[automerger skipped] [Cherry-pick] Increase post-reboot sleep am: b96236cf75 -s ours
am skip reason: Merged-In I978e2c37cb65afde3cf4b9d43297c48cb822aedb with SHA-1 8f4fc2b1db is already in history
Original change: https://android-review.googlesource.com/c/platform/packages/modules/StatsD/+/2806054
Change-Id: I15133d8aeacd45bd3b77c107f25aae654c58b316
Signed-off-by: Automerger Merge Worker <[email protected]>
diff --git a/.gitignore b/.gitignore
index ccff052..3c2d0cd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,8 @@
**/.idea
**/*.iml
**/*.ipr
+
+# VSCode
+**/.vscode/
+**/packages.modules.StatsD.code-workspace
+
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 73434a3..121ca17 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -27,7 +27,7 @@
"name" : "statsd_test"
}
],
- "hwasan-postsubmit" : [
+ "hwasan-presubmit" : [
{
"name" : "FrameworkStatsdTest"
},
diff --git a/aidl/Android.bp b/aidl/Android.bp
index cea9739..c79f0b8 100644
--- a/aidl/Android.bp
+++ b/aidl/Android.bp
@@ -26,11 +26,14 @@
name: "statsd-aidl",
unstable: true,
srcs: [
+ "android/os/StatsSubscriptionCallbackReason.aidl",
+ "android/os/IStatsSubscriptionCallback.aidl",
"android/os/IPendingIntentRef.aidl",
"android/os/IPullAtomCallback.aidl",
"android/os/IPullAtomResultReceiver.aidl",
"android/os/IStatsCompanionService.aidl",
"android/os/IStatsd.aidl",
+ "android/os/IStatsQueryCallback.aidl",
"android/os/StatsDimensionsValueParcel.aidl",
"android/util/PropertyParcel.aidl",
"android/util/StatsEventParcel.aidl",
@@ -46,9 +49,6 @@
ndk: {
enabled: true,
apex_available: [
- // TODO(b/145923087): Remove this once statsd binary is in apex
- "//apex_available:platform",
-
"com.android.os.statsd",
"test_com.android.os.statsd",
],
diff --git a/aidl/android/os/IPendingIntentRef.aidl b/aidl/android/os/IPendingIntentRef.aidl
index 000a699..5a0f5d7 100644
--- a/aidl/android/os/IPendingIntentRef.aidl
+++ b/aidl/android/os/IPendingIntentRef.aidl
@@ -43,4 +43,10 @@
oneway void sendSubscriberBroadcast(long configUid, long configId, long subscriptionId,
long subscriptionRuleId, in String[] cookies,
in StatsDimensionsValueParcel dimensionsValueParcel);
+
+ /**
+ * Send a broadcast to the specified PendingIntent notifying it that the list of restricted
+ * metrics has changed.
+ */
+ oneway void sendRestrictedMetricsChangedBroadcast(in long[] metricIds);
}
diff --git a/aidl/android/os/IStatsManagerService.aidl b/aidl/android/os/IStatsManagerService.aidl
index b59a97e..ef1e4c6 100644
--- a/aidl/android/os/IStatsManagerService.aidl
+++ b/aidl/android/os/IStatsManagerService.aidl
@@ -18,6 +18,7 @@
import android.app.PendingIntent;
import android.os.IPullAtomCallback;
+import android.os.IStatsQueryCallback;
/**
* Binder interface to communicate with the Java-based statistics service helper.
@@ -131,6 +132,31 @@
oneway void registerPullAtomCallback(int atomTag, long coolDownMillis, long timeoutMillis,
in int[] additiveFields, IPullAtomCallback pullerCallback);
- /** Tell StatsManagerService to unregister the pulller for the given atom tag from statsd. */
+ /** Tell StatsManagerService to unregister the puller for the given atom tag from statsd. */
oneway void unregisterPullAtomCallback(int atomTag);
+
+ /** Section for restricted-logging methods. */
+
+ /** Queries data from underlying statsd sql store. */
+ oneway void querySql(in String sqlQuery, in int minSqlClientVersion,
+ in @nullable byte[] policyConfig, in IStatsQueryCallback queryCallback,
+ in long configKey, in String configPackage);
+
+ /**
+ * Registers the operation that is called whenever there is a change in the restricted metrics
+ * for a specified config that are present for this client. This operation allows statsd to
+ * inform the client about the current restricted metrics available to be queried for
+ * the specified config.
+ *
+ * Requires Manifest.permission.READ_RESTRICTED_STATS.
+ */
+ long[] setRestrictedMetricsChangedOperation(in PendingIntent pendingIntent, in long configKey,
+ in String configPackage);
+
+ /**
+ * Removes the restricted metrics changed operation for the specified config key/package.
+ *
+ * Requires Manifest.permission.READ_RESTRICTED_STATS.
+ */
+ void removeRestrictedMetricsChangedOperation(in long configKey, in String configPackage);
}
diff --git a/aidl/android/os/IStatsQueryCallback.aidl b/aidl/android/os/IStatsQueryCallback.aidl
new file mode 100644
index 0000000..911c816
--- /dev/null
+++ b/aidl/android/os/IStatsQueryCallback.aidl
@@ -0,0 +1,28 @@
+/* *
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/**
+ * Binder interface to hold a Callback for Stats SQL queries.
+ * {@hide}
+ */
+interface IStatsQueryCallback {
+ oneway void sendResults(in String[] queryData, in String[] columnNames,
+ in int[] columnTypes, int rowCount);
+
+ oneway void sendFailure(String error);
+}
diff --git a/aidl/android/os/IStatsSubscriptionCallback.aidl b/aidl/android/os/IStatsSubscriptionCallback.aidl
new file mode 100644
index 0000000..abc2d35
--- /dev/null
+++ b/aidl/android/os/IStatsSubscriptionCallback.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+import android.os.StatsSubscriptionCallbackReason;
+
+/**
+ * Binder interface for callback that provides data for a subscription.
+ * {@hide}
+ */
+oneway interface IStatsSubscriptionCallback {
+ /**
+ * Send back subscription data.
+ */
+ void onSubscriptionData(StatsSubscriptionCallbackReason reason, in byte[] subscriptionPayload);
+}
diff --git a/aidl/android/os/IStatsd.aidl b/aidl/android/os/IStatsd.aidl
index 2a0fe69..2cbaf9d 100644
--- a/aidl/android/os/IStatsd.aidl
+++ b/aidl/android/os/IStatsd.aidl
@@ -16,10 +16,12 @@
package android.os;
+import android.os.IStatsSubscriptionCallback;
import android.os.IPendingIntentRef;
import android.os.IPullAtomCallback;
import android.os.ParcelFileDescriptor;
import android.util.PropertyParcel;
+import android.os.IStatsQueryCallback;
/**
* Binder interface to communicate with the statistics management service.
@@ -240,4 +242,64 @@
* Notifies of properties in statsd_java namespace.
*/
oneway void updateProperties(in PropertyParcel[] properties);
+
+ /** Section for restricted-logging methods. */
+ /**
+ * Queries data from underlying statsd sql store.
+ */
+ oneway void querySql(in String sqlQuery, in int minSqlClientVersion,
+ in @nullable byte[] policyConfig, in IStatsQueryCallback queryCallback,
+ in long configKey, in String configPackage, in int callingUid);
+
+ /**
+ * Registers the operation that is called whenever there is a change in the restricted metrics
+ * for a specified config that are present for this client. This operation allows statsd to
+ * inform the client about the current restricted metrics available to be queried for the
+ * specified config.
+ *
+ * Requires Manifest.permission.READ_RESTRICTED_STATS
+ */
+ long[] setRestrictedMetricsChangedOperation(in long configKey, in String configPackage,
+ in IPendingIntentRef pir, int callingUid);
+
+ /**
+ * Removes the restricted metrics changed operation for the specified config package/id.
+ *
+ * Requires Manifest.permission.READ_RESTRICTED_STATS.
+ */
+ void removeRestrictedMetricsChangedOperation(in long configKey, in String configPackage,
+ in int callingUid);
+
+ /** Section for atoms subscription methods. */
+ /**
+ * Adds a subscription for atom events.
+ *
+ * IStatsSubscriptionCallback Binder interface will be used to deliver subscription data back to
+ * the subscriber. IStatsSubscriptionCallback also uniquely identifies this subscription - it
+ * should not be reused for another subscription.
+ *
+ * Enforces caller is in the traced_probes selinux domain.
+ */
+ oneway void addSubscription(in byte[] subscriptionConfig,
+ IStatsSubscriptionCallback callback);
+
+ /**
+ * Unsubscribe from a given subscription identified by the IBinder token.
+ *
+ * This will subsequently trigger IStatsSubscriptionCallback with pending data
+ * for this subscription.
+ *
+ * Enforces caller is in the traced_probes selinux domain.
+ */
+ oneway void removeSubscription(IStatsSubscriptionCallback callback);
+
+ /**
+ * Flush data for a subscription.
+ *
+ * This will subsequently trigger IStatsSubscriptionCallback with pending data
+ * for this subscription.
+ *
+ * Enforces caller is in the traced_probes selinux domain.
+ */
+ oneway void flushSubscription(IStatsSubscriptionCallback callback);
}
diff --git a/aidl/android/os/StatsSubscriptionCallbackReason.aidl b/aidl/android/os/StatsSubscriptionCallbackReason.aidl
new file mode 100644
index 0000000..5e3a945
--- /dev/null
+++ b/aidl/android/os/StatsSubscriptionCallbackReason.aidl
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+
+/**
+ * @hide
+ */
+@Backing(type="int")
+enum StatsSubscriptionCallbackReason {
+ STATSD_INITIATED = 1,
+ FLUSH_REQUESTED = 2,
+ SUBSCRIPTION_ENDED = 3,
+}
diff --git a/apex/Android.bp b/apex/Android.bp
index 2988a1d..0bad0ac 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -69,9 +69,13 @@
// ==========================================================
sdk {
name: "statsd-module-sdk",
- bootclasspath_fragments: ["com.android.os.statsd-bootclasspath-fragment"],
- systemserverclasspath_fragments: ["com.android.os.statsd-systemserverclasspath-fragment"],
+ apexes: [
+ // Adds exportable dependencies of the APEX to the sdk,
+ // e.g. *classpath_fragments.
+ "com.android.os.statsd",
+ ],
native_shared_libs: [
+ "libstatspull",
"libstatssocket",
],
}
diff --git a/apex/apex_manifest.json b/apex/apex_manifest.json
index 0fb3227..a8ea55c 100644
--- a/apex/apex_manifest.json
+++ b/apex/apex_manifest.json
@@ -1,5 +1,8 @@
{
"name": "com.android.os.statsd",
- "version": 339990000
+
+ // Placeholder module version to be replaced during build.
+ // Do not change!
+ "version": 0
}
diff --git a/apex/tests/libstatspull/src/com/android/internal/os/statsd/libstats/LibStatsPullTests.java b/apex/tests/libstatspull/src/com/android/internal/os/statsd/libstats/LibStatsPullTests.java
index 6108a32..028652b 100644
--- a/apex/tests/libstatspull/src/com/android/internal/os/statsd/libstats/LibStatsPullTests.java
+++ b/apex/tests/libstatspull/src/com/android/internal/os/statsd/libstats/LibStatsPullTests.java
@@ -248,6 +248,32 @@
}
}
+ /**
+ * Tests puller registration/unregistration is processed in order
+ */
+ @Test
+ public void testPullAtomCallbacksProcessedInOrder() throws Exception {
+ StatsManager statsManager = mContext.getSystemService(StatsManager.class);
+ // Upload a config that captures that pulled atom.
+ createAndAddConfigToStatsd(statsManager);
+ for(int i = 0; i < 20; i++) {
+ // Adding & removing the puller back to back to detect any potentional
+ // threads scheduling dependencies
+ setStatsPuller(PULL_ATOM_TAG, sPullTimeoutMillis, sCoolDownMillis, sPullReturnValue,
+ sPullLatencyMillis, sAtomsPerPull);
+ clearStatsPuller(PULL_ATOM_TAG);
+ }
+ // with out-of-order registration - there is a chance that the puller callback is still
+ // registered - this is wrong and we would like to test it
+ Thread.sleep(SHORT_SLEEP_MILLIS);
+ StatsLog.logStart(APP_BREADCRUMB_LABEL);
+
+ // Let the current bucket finish.
+ Thread.sleep(LONG_SLEEP_MILLIS);
+ List<Atom> data = StatsConfigUtils.getGaugeMetricDataList(statsManager, sConfigId);
+ assertThat(data.size()).isEqualTo(0);
+ }
+
private void createAndAddConfigToStatsd(StatsManager statsManager) throws Exception {
sConfigId = System.currentTimeMillis();
long triggerMatcherId = sConfigId + 10;
diff --git a/framework/Android.bp b/framework/Android.bp
index daf93cb..360801b 100644
--- a/framework/Android.bp
+++ b/framework/Android.bp
@@ -38,26 +38,34 @@
filegroup {
name: "framework-statsd-sources",
+ defaults: ["framework-sources-module-defaults"],
srcs: [
"java/**/*.java",
":framework-statsd-aidl-sources",
":statslog-statsd-java-gen",
],
- visibility: [
- "//frameworks/base", // For the "global" stubs.
- "//frameworks/base/apex/statsd:__subpackages__",
- "//packages/modules/StatsD/framework:__subpackages__",
- ],
+ visibility: ["//packages/modules/StatsD/framework:__subpackages__"],
}
+
java_sdk_library {
name: "framework-statsd",
defaults: ["framework-module-defaults"],
installable: true,
+ jarjar_rules: "jarjar-rules.txt",
+
srcs: [
":framework-statsd-sources",
],
+ libs: [
+ "androidx.annotation_annotation",
+ ],
+
+ static_libs: [
+ "modules-utils-build",
+ ],
+
permitted_packages: [
"android.app",
"android.os",
@@ -91,6 +99,15 @@
min_sdk_version: "30",
}
+java_api_contribution {
+ name: "framework-statsd-public-stubs",
+ api_surface: "public",
+ api_file: "api/current.txt",
+ visibility: [
+ "//build/orchestrator/apis",
+ ],
+}
+
// JNI library for StatsLog.write
cc_library_shared {
name: "libstats_jni",
diff --git a/framework/api/system-current.txt b/framework/api/system-current.txt
index c432a7f..41b5891 100644
--- a/framework/api/system-current.txt
+++ b/framework/api/system-current.txt
@@ -1,6 +1,18 @@
// Signature format: 2.0
package android.app {
+ public class StatsCursor extends android.database.AbstractCursor {
+ method @NonNull public String[] getColumnNames();
+ method public int getCount();
+ method public double getDouble(int);
+ method public float getFloat(int);
+ method public int getInt(int);
+ method public long getLong(int);
+ method public short getShort(int);
+ method @NonNull public String getString(int);
+ method public boolean isNull(int);
+ }
+
public final class StatsManager {
method @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public void addConfig(long, byte[]) throws android.app.StatsManager.StatsUnavailableException;
method @Deprecated @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public boolean addConfiguration(long, byte[]);
@@ -10,6 +22,7 @@
method @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public long[] getRegisteredExperimentIds() throws android.app.StatsManager.StatsUnavailableException;
method @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public byte[] getReports(long) throws android.app.StatsManager.StatsUnavailableException;
method @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public byte[] getStatsMetadata() throws android.app.StatsManager.StatsUnavailableException;
+ method @RequiresPermission(android.Manifest.permission.READ_RESTRICTED_STATS) public void query(long, @NonNull String, @NonNull android.app.StatsQuery, @NonNull java.util.concurrent.Executor, @NonNull android.os.OutcomeReceiver<android.app.StatsCursor,android.app.StatsManager.StatsQueryException>) throws android.app.StatsManager.StatsUnavailableException;
method @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public void removeConfig(long) throws android.app.StatsManager.StatsUnavailableException;
method @Deprecated @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public boolean removeConfiguration(long);
method @NonNull @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public long[] setActiveConfigsChangedOperation(@Nullable android.app.PendingIntent) throws android.app.StatsManager.StatsUnavailableException;
@@ -18,12 +31,14 @@
method @Deprecated @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public boolean setDataFetchOperation(long, android.app.PendingIntent);
method @RequiresPermission(allOf={android.Manifest.permission.DUMP, android.Manifest.permission.PACKAGE_USAGE_STATS}) public void setFetchReportsOperation(android.app.PendingIntent, long) throws android.app.StatsManager.StatsUnavailableException;
method @RequiresPermission(android.Manifest.permission.REGISTER_STATS_PULL_ATOM) public void setPullAtomCallback(int, @Nullable android.app.StatsManager.PullAtomMetadata, @NonNull java.util.concurrent.Executor, @NonNull android.app.StatsManager.StatsPullAtomCallback);
+ method @NonNull @RequiresPermission(android.Manifest.permission.READ_RESTRICTED_STATS) public long[] setRestrictedMetricsChangedOperation(long, @NonNull String, @Nullable android.app.PendingIntent) throws android.app.StatsManager.StatsUnavailableException;
field public static final String ACTION_STATSD_STARTED = "android.app.action.STATSD_STARTED";
field public static final String EXTRA_STATS_ACTIVE_CONFIG_KEYS = "android.app.extra.STATS_ACTIVE_CONFIG_KEYS";
field public static final String EXTRA_STATS_BROADCAST_SUBSCRIBER_COOKIES = "android.app.extra.STATS_BROADCAST_SUBSCRIBER_COOKIES";
field public static final String EXTRA_STATS_CONFIG_KEY = "android.app.extra.STATS_CONFIG_KEY";
field public static final String EXTRA_STATS_CONFIG_UID = "android.app.extra.STATS_CONFIG_UID";
field public static final String EXTRA_STATS_DIMENSIONS_VALUE = "android.app.extra.STATS_DIMENSIONS_VALUE";
+ field public static final String EXTRA_STATS_RESTRICTED_METRIC_IDS = "android.app.extra.STATS_RESTRICTED_METRIC_IDS";
field public static final String EXTRA_STATS_SUBSCRIPTION_ID = "android.app.extra.STATS_SUBSCRIPTION_ID";
field public static final String EXTRA_STATS_SUBSCRIPTION_RULE_ID = "android.app.extra.STATS_SUBSCRIPTION_RULE_ID";
field public static final int PULL_SKIP = 1; // 0x1
@@ -48,11 +63,33 @@
method public int onPullAtom(int, @NonNull java.util.List<android.util.StatsEvent>);
}
+ public static class StatsManager.StatsQueryException extends android.util.AndroidException {
+ ctor public StatsManager.StatsQueryException(@NonNull String);
+ ctor public StatsManager.StatsQueryException(@NonNull String, @NonNull Throwable);
+ }
+
public static class StatsManager.StatsUnavailableException extends android.util.AndroidException {
ctor public StatsManager.StatsUnavailableException(String);
ctor public StatsManager.StatsUnavailableException(String, Throwable);
}
+ public final class StatsQuery {
+ method @IntRange(from=0) public int getMinSqlClientVersion();
+ method @Nullable public byte[] getPolicyConfig();
+ method @NonNull public String getRawSql();
+ method public int getSqlDialect();
+ field public static final int DIALECT_SQLITE = 1; // 0x1
+ field public static final int DIALECT_UNKNOWN = 0; // 0x0
+ }
+
+ public static final class StatsQuery.Builder {
+ ctor public StatsQuery.Builder(@NonNull String);
+ method @NonNull public android.app.StatsQuery build();
+ method @NonNull public android.app.StatsQuery.Builder setMinSqlClientVersion(@IntRange(from=0) int);
+ method @NonNull public android.app.StatsQuery.Builder setPolicyConfig(@NonNull byte[]);
+ method @NonNull public android.app.StatsQuery.Builder setSqlDialect(int);
+ }
+
}
package android.os {
@@ -112,12 +149,26 @@
method @Deprecated public static void writeRaw(@NonNull byte[], int);
field public static final byte ANNOTATION_ID_DEFAULT_STATE = 6; // 0x6
field public static final byte ANNOTATION_ID_EXCLUSIVE_STATE = 4; // 0x4
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_ACCESSIBILITY = 14; // 0xe
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_AMBIENT_SENSING = 17; // 0x11
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_APP_ACTIVITY = 12; // 0xc
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_APP_USAGE = 11; // 0xb
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_DEMOGRAPHIC_CLASSIFICATION = 18; // 0x12
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_HEALTH_CONNECT = 13; // 0xd
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_PERIPHERAL_DEVICE_INFO = 10; // 0xa
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_SYSTEM_SEARCH = 15; // 0xf
+ field public static final byte ANNOTATION_ID_FIELD_RESTRICTION_USER_ENGAGEMENT = 16; // 0x10
field public static final byte ANNOTATION_ID_IS_UID = 1; // 0x1
field public static final byte ANNOTATION_ID_PRIMARY_FIELD = 3; // 0x3
field public static final byte ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID = 5; // 0x5
+ field public static final byte ANNOTATION_ID_RESTRICTION_CATEGORY = 9; // 0x9
field public static final byte ANNOTATION_ID_STATE_NESTED = 8; // 0x8
field public static final byte ANNOTATION_ID_TRIGGER_STATE_RESET = 7; // 0x7
field public static final byte ANNOTATION_ID_TRUNCATE_TIMESTAMP = 2; // 0x2
+ field public static final int RESTRICTION_CATEGORY_AUTHENTICATION = 3; // 0x3
+ field public static final int RESTRICTION_CATEGORY_DIAGNOSTIC = 1; // 0x1
+ field public static final int RESTRICTION_CATEGORY_FRAUD_AND_ABUSE = 4; // 0x4
+ field public static final int RESTRICTION_CATEGORY_SYSTEM_INTELLIGENCE = 2; // 0x2
}
}
diff --git a/framework/jarjar-rules.txt b/framework/jarjar-rules.txt
new file mode 100644
index 0000000..c78153d
--- /dev/null
+++ b/framework/jarjar-rules.txt
@@ -0,0 +1 @@
+rule com.android.modules.utils.** com.android.internal.statsd.@0
\ No newline at end of file
diff --git a/framework/java/android/app/StatsCursor.java b/framework/java/android/app/StatsCursor.java
new file mode 100644
index 0000000..29cd241
--- /dev/null
+++ b/framework/java/android/app/StatsCursor.java
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.app;
+
+import android.annotation.NonNull;
+import android.annotation.SystemApi;
+import android.annotation.SuppressLint;
+import android.database.AbstractCursor;
+import android.database.MatrixCursor;
+
+/**
+ * Custom cursor implementation to hold a cross-process cursor to pass data to caller.
+ *
+ * @hide
+ */
+@SystemApi
+public class StatsCursor extends AbstractCursor {
+ private final MatrixCursor mMatrixCursor;
+ private final int[] mColumnTypes;
+ private final String[] mColumnNames;
+ private final int mRowCount;
+
+ /**
+ * @hide
+ **/
+ public StatsCursor(String[] queryData, String[] columnNames, int[] columnTypes, int rowCount) {
+ mColumnTypes = columnTypes;
+ mColumnNames = columnNames;
+ mRowCount = rowCount;
+ mMatrixCursor = new MatrixCursor(columnNames);
+ for (int i = 0; i < rowCount; i++) {
+ MatrixCursor.RowBuilder builder = mMatrixCursor.newRow();
+ for (int j = 0; j < columnNames.length; j++) {
+ int dataIndex = i * columnNames.length + j;
+ builder.add(columnNames[j], queryData[dataIndex]);
+ }
+ }
+ }
+
+ /**
+ * Returns the numbers of rows in the cursor.
+ *
+ * @return the number of rows in the cursor.
+ */
+ @Override
+ public int getCount() {
+ return mRowCount;
+ }
+
+ /**
+ * Returns a string array holding the names of all of the columns in the
+ * result set in the order in which they were listed in the result.
+ *
+ * @return the names of the columns returned in this query.
+ */
+ @Override
+ @NonNull
+ public String[] getColumnNames() {
+ return mColumnNames;
+ }
+
+ /**
+ * Returns the value of the requested column as a String.
+ *
+ * @param column the zero-based index of the target column.
+ * @return the value of that column as a String.
+ */
+ @Override
+ @NonNull
+ public String getString(int column) {
+ return mMatrixCursor.getString(column);
+ }
+
+ /**
+ * Returns the value of the requested column as a short.
+ *
+ * @param column the zero-based index of the target column.
+ * @return the value of that column as a short.
+ */
+ @Override
+ @SuppressLint("NoByteOrShort")
+ public short getShort(int column) {
+ return mMatrixCursor.getShort(column);
+ }
+
+ /**
+ * Returns the value of the requested column as an int.
+ *
+ * @param column the zero-based index of the target column.
+ * @return the value of that column as an int.
+ */
+ @Override
+ public int getInt(int column) {
+ return mMatrixCursor.getInt(column);
+ }
+
+ /**
+ * Returns the value of the requested column as a long.
+ *
+ * @param column the zero-based index of the target column.
+ * @return the value of that column as a long.
+ */
+ @Override
+ public long getLong(int column) {
+ return mMatrixCursor.getLong(column);
+ }
+
+ /**
+ * Returns the value of the requested column as a float.
+ *
+ * @param column the zero-based index of the target column.
+ * @return the value of that column as a float.
+ */
+ @Override
+ public float getFloat(int column) {
+ return mMatrixCursor.getFloat(column);
+ }
+
+ /**
+ * Returns the value of the requested column as a double.
+ *
+ * @param column the zero-based index of the target column.
+ * @return the value of that column as a double.
+ */
+ @Override
+ public double getDouble(int column) {
+ return mMatrixCursor.getDouble(column);
+ }
+
+ /**
+ * Returns <code>true</code> if the value in the indicated column is null.
+ *
+ * @param column the zero-based index of the target column.
+ * @return whether the column value is null.
+ */
+ @Override
+ public boolean isNull(int column) {
+ return mMatrixCursor.isNull(column);
+ }
+
+ /**
+ * Returns the data type of the given column's value.
+ *
+ * @param column the zero-based index of the target column.
+ * @return column value type
+ */
+ @Override
+ public int getType(int column) {
+ return mColumnTypes[column];
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean onMove(int oldPosition, int newPosition) {
+ return mMatrixCursor.moveToPosition(newPosition);
+ }
+}
diff --git a/framework/java/android/app/StatsManager.java b/framework/java/android/app/StatsManager.java
index 92eb416..a0379fd 100644
--- a/framework/java/android/app/StatsManager.java
+++ b/framework/java/android/app/StatsManager.java
@@ -17,6 +17,7 @@
import static android.Manifest.permission.DUMP;
import static android.Manifest.permission.PACKAGE_USAGE_STATS;
+import static android.Manifest.permission.READ_RESTRICTED_STATS;
import android.annotation.CallbackExecutor;
import android.annotation.NonNull;
@@ -25,9 +26,12 @@
import android.annotation.SystemApi;
import android.content.Context;
import android.os.Binder;
+import android.os.Build;
import android.os.IPullAtomCallback;
import android.os.IPullAtomResultReceiver;
import android.os.IStatsManagerService;
+import android.os.IStatsQueryCallback;
+import android.os.OutcomeReceiver;
import android.os.RemoteException;
import android.os.StatsFrameworkInitializer;
import android.util.AndroidException;
@@ -35,8 +39,11 @@
import android.util.StatsEvent;
import android.util.StatsEventParcel;
+import androidx.annotation.RequiresApi;
+
import com.android.internal.annotations.GuardedBy;
import com.android.internal.annotations.VisibleForTesting;
+import com.android.modules.utils.build.SdkLevel;
import java.util.ArrayList;
import java.util.List;
@@ -95,6 +102,12 @@
"android.app.extra.STATS_ACTIVE_CONFIG_KEYS";
/**
+ * Long array extra of the restricted metric ids present for the client.
+ */
+ public static final String EXTRA_STATS_RESTRICTED_METRIC_IDS =
+ "android.app.extra.STATS_RESTRICTED_METRIC_IDS";
+
+ /**
* Broadcast Action: Statsd has started.
* Configurations and PendingIntents can now be sent to it.
*/
@@ -120,7 +133,7 @@
/**
* @hide
**/
- @VisibleForTesting public static final long DEFAULT_TIMEOUT_MILLIS = 2_000L; // 2 seconds.
+ @VisibleForTesting public static final long DEFAULT_TIMEOUT_MILLIS = 1_500L; // 1.5 seconds.
/**
* Constructor for StatsManagerClient.
@@ -363,6 +376,115 @@
}
}
+ /**
+ * Registers the operation that is called whenever there is a change in the restricted metrics
+ * for a specified config that are present for this client. This operation allows statsd to
+ * inform the client about the current restricted metric ids available to be queried for the
+ * specified config. This call can block on statsd.
+ *
+ * If there is no config in statsd that matches the provided config package and key, an empty
+ * list is returned. The pending intent will be tracked, and the operation will be called
+ * whenever a matching config is added.
+ *
+ * @param configKey The configKey passed by the package that added the config in
+ * StatsManager#addConfig
+ * @param configPackage The package that added the config in StatsManager#addConfig
+ * @param pendingIntent the PendingIntent to use when broadcasting info to caller.
+ * May be null, in which case it removes any associated pending intent
+ * for this client.
+ * @return A list of metric ids identifying the restricted metrics that are currently available
+ * to be queried for the specified config.
+ * If the pendingIntent is null, this will be an empty list.
+ * @throws StatsUnavailableException if unsuccessful due to failing to connect to stats service
+ */
+ @RequiresPermission(READ_RESTRICTED_STATS)
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public @NonNull long[] setRestrictedMetricsChangedOperation(long configKey,
+ @NonNull String configPackage,
+ @Nullable PendingIntent pendingIntent)
+ throws StatsUnavailableException {
+ synchronized (sLock) {
+ try {
+ IStatsManagerService service = getIStatsManagerServiceLocked();
+ if (pendingIntent == null) {
+ service.removeRestrictedMetricsChangedOperation(configKey, configPackage);
+ return new long[0];
+ } else {
+ return service.setRestrictedMetricsChangedOperation(pendingIntent,
+ configKey, configPackage);
+ }
+
+ } catch (RemoteException e) {
+ Log.e(TAG, "Failed to connect to statsmanager "
+ + "when registering restricted metrics listener.");
+ throw new StatsUnavailableException("could not connect", e);
+ } catch (SecurityException e) {
+ throw new StatsUnavailableException(e.getMessage(), e);
+ }
+ }
+ }
+
+ /**
+ * Queries the underlying service based on query received and populates the OutcomeReceiver via
+ * callback. This call is blocking on statsd being available, but is otherwise nonblocking.
+ * i.e. the call can return before the query processing is done.
+ * <p>
+ * Two types of tables are supported: Metric tables and the device information table.
+ * </p>
+ * <p>
+ * The device information table is named device_info and contains the following columns:
+ * sdkVersion, model, product, hardware, device, osBuild, fingerprint, brand, manufacturer, and
+ * board. These columns correspond to {@link Build.VERSION.SDK_INT}, {@link Build.MODEL},
+ * {@link Build.PRODUCT}, {@link Build.HARDWARE}, {@link Build.DEVICE}, {@link Build.ID},
+ * {@link Build.FINGERPRINT}, {@link Build.BRAND}, {@link Build.MANUFACTURER},
+ * {@link Build.BOARD} respectively.
+ * </p>
+ * <p>
+ * The metric tables are named metric_METRIC_ID where METRIC_ID is the metric id that is part
+ * of the wire encoded config passed to {@link #addConfig(long, byte[])}. If the metric id is
+ * negative, then the '-' character is replaced with 'n' in the table name. Each metric table
+ * contains the 3 columns followed by n columns of the following form: atomId,
+ * elapsedTimestampNs, wallTimestampNs, field_1, field_2, field_3 ... field_n. These
+ * columns correspond to to the id of the atom from frameworks/proto_logging/stats/atoms.proto,
+ * time when the atom is recorded, and the data fields within each atom.
+ * </p>
+ * @param configKey The configKey passed by the package that added
+ * the config being queried in StatsManager#addConfig
+ * @param configPackage The package that added the config being queried in
+ * StatsManager#addConfig
+ * @param query the query object encapsulating a sql-string and necessary config to query
+ * underlying sql-based data store.
+ * @param executor the executor on which outcomeReceiver will be invoked.
+ * @param outcomeReceiver the receiver to be populated with cursor pointing to result data.
+ */
+ @RequiresPermission(READ_RESTRICTED_STATS)
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public void query(long configKey, @NonNull String configPackage, @NonNull StatsQuery query,
+ @NonNull @CallbackExecutor Executor executor,
+ @NonNull OutcomeReceiver<StatsCursor, StatsQueryException> outcomeReceiver)
+ throws StatsUnavailableException {
+ if(query.getSqlDialect() != StatsQuery.DIALECT_SQLITE) {
+ executor.execute(() -> {
+ outcomeReceiver.onError(new StatsQueryException("Unsupported Sql Dialect"));
+ });
+ return;
+ }
+
+ StatsQueryCallbackInternal callbackInternal =
+ new StatsQueryCallbackInternal(outcomeReceiver, executor);
+ synchronized (sLock) {
+ try {
+ IStatsManagerService service = getIStatsManagerServiceLocked();
+ service.querySql(query.getRawSql(), query.getMinSqlClientVersion(),
+ query.getPolicyConfig(), callbackInternal, configKey,
+ configPackage);
+ } catch (RemoteException | IllegalStateException e) {
+ throw new StatsUnavailableException("could not connect", e);
+ }
+ }
+ }
+
+
// TODO: Temporary for backwards compatibility. Remove.
/**
* @deprecated Use {@link #setFetchReportsOperation(PendingIntent, long)}
@@ -721,6 +843,52 @@
return mStatsManagerService;
}
+ private static class StatsQueryCallbackInternal extends IStatsQueryCallback.Stub {
+ OutcomeReceiver<StatsCursor, StatsQueryException> queryCallback;
+ Executor mExecutor;
+
+ StatsQueryCallbackInternal(OutcomeReceiver<StatsCursor, StatsQueryException> queryCallback,
+ @NonNull @CallbackExecutor Executor executor) {
+ this.queryCallback = queryCallback;
+ this.mExecutor = executor;
+ }
+
+ @Override
+ public void sendResults(String[] queryData, String[] columnNames, int[] columnTypes,
+ int rowCount) {
+ if (!SdkLevel.isAtLeastU()) {
+ throw new IllegalStateException(
+ "StatsManager#query is not available before Android U");
+ }
+ final long token = Binder.clearCallingIdentity();
+ try {
+ mExecutor.execute(() -> {
+ StatsCursor cursor = new StatsCursor(queryData, columnNames, columnTypes,
+ rowCount);
+ queryCallback.onResult(cursor);
+ });
+ } finally {
+ Binder.restoreCallingIdentity(token);
+ }
+ }
+
+ @Override
+ public void sendFailure(String error) {
+ if (!SdkLevel.isAtLeastU()) {
+ throw new IllegalStateException(
+ "StatsManager#query is not available before Android U");
+ }
+ final long token = Binder.clearCallingIdentity();
+ try {
+ mExecutor.execute(() -> {
+ queryCallback.onError(new StatsQueryException(error));
+ });
+ } finally {
+ Binder.restoreCallingIdentity(token);
+ }
+ }
+ }
+
/**
* Exception thrown when communication with the stats service fails (eg if it is not available).
* This might be thrown early during boot before the stats service has started or if it crashed.
@@ -734,4 +902,18 @@
super("Failed to connect to statsd: " + reason, e);
}
}
+
+ /**
+ * Exception thrown when executing a query in statsd fails for any reason. This might be thrown
+ * if the query is malformed or if there is a database error when executing the query.
+ */
+ public static class StatsQueryException extends AndroidException {
+ public StatsQueryException(@NonNull String reason) {
+ super("Failed to query statsd: " + reason);
+ }
+
+ public StatsQueryException(@NonNull String reason, @NonNull Throwable e) {
+ super("Failed to query statsd: " + reason, e);
+ }
+ }
}
diff --git a/framework/java/android/app/StatsQuery.java b/framework/java/android/app/StatsQuery.java
new file mode 100644
index 0000000..a4a315c
--- /dev/null
+++ b/framework/java/android/app/StatsQuery.java
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.app;
+
+import android.annotation.IntDef;
+import android.annotation.IntRange;
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.annotation.SystemApi;
+
+/**
+ * Represents a query that contains information required for StatsManager to return relevant metric
+ * data.
+ *
+ * @hide
+ */
+@SystemApi
+public final class StatsQuery {
+ /**
+ * Default value for SQL dialect.
+ */
+ public static final int DIALECT_UNKNOWN = 0;
+
+ /**
+ * Query passed is of SQLite dialect.
+ */
+ public static final int DIALECT_SQLITE = 1;
+
+ /**
+ * @hide
+ */
+ @IntDef(prefix = {"DIALECT_"}, value = {DIALECT_UNKNOWN, DIALECT_SQLITE})
+ @interface SqlDialect {
+ }
+
+ private final int sqlDialect;
+ private final String rawSql;
+ private final int minClientSqlVersion;
+ private final byte[] policyConfig;
+ private StatsQuery(int sqlDialect, @NonNull String rawSql, int minClientSqlVersion,
+ @Nullable byte[] policyConfig) {
+ this.sqlDialect = sqlDialect;
+ this.rawSql = rawSql;
+ this.minClientSqlVersion = minClientSqlVersion;
+ this.policyConfig = policyConfig;
+ }
+
+ /**
+ * Returns the SQL dialect of the query.
+ */
+ public @SqlDialect int getSqlDialect() {
+ return sqlDialect;
+ }
+
+ /**
+ * Returns the raw SQL of the query.
+ */
+ @NonNull
+ public String getRawSql() {
+ return rawSql;
+ }
+
+ /**
+ * Returns the minimum SQL client library version required to execute the query.
+ */
+ @IntRange(from = 0)
+ public int getMinSqlClientVersion() {
+ return minClientSqlVersion;
+ }
+
+ /**
+ * Returns the wire-encoded StatsPolicyConfig proto that contains information to verify the
+ * query against a policy defined on the underlying data. Returns null if no policy was set.
+ */
+ @Nullable
+ public byte[] getPolicyConfig() {
+ return policyConfig;
+ }
+
+ /**
+ * Builder for constructing a StatsQuery object.
+ * <p>Usage:</p>
+ * <code>
+ * StatsQuery statsQuery = new StatsQuery.Builder("SELECT * from table")
+ * .setSqlDialect(StatsQuery.DIALECT_SQLITE)
+ * .setMinClientSqlVersion(1)
+ * .build();
+ * </code>
+ */
+ public static final class Builder {
+ private int sqlDialect;
+ private String rawSql;
+ private int minSqlClientVersion;
+ private byte[] policyConfig;
+
+ /**
+ * Returns a new StatsQuery.Builder object for constructing StatsQuery for
+ * StatsManager#query
+ */
+ public Builder(@NonNull final String rawSql) {
+ if (rawSql == null) {
+ throw new IllegalArgumentException("rawSql must not be null");
+ }
+ this.rawSql = rawSql;
+ this.sqlDialect = DIALECT_SQLITE;
+ this.minSqlClientVersion = 1;
+ this.policyConfig = null;
+ }
+
+ /**
+ * Sets the SQL dialect of the query.
+ *
+ * @param sqlDialect The SQL dialect of the query.
+ */
+ @NonNull
+ public Builder setSqlDialect(@SqlDialect final int sqlDialect) {
+ this.sqlDialect = sqlDialect;
+ return this;
+ }
+
+ /**
+ * Sets the minimum SQL client library version required to execute the query.
+ *
+ * @param minSqlClientVersion The minimum SQL client version required to execute the query.
+ */
+ @NonNull
+ public Builder setMinSqlClientVersion(@IntRange(from = 0) final int minSqlClientVersion) {
+ if (minSqlClientVersion < 0) {
+ throw new IllegalArgumentException("minSqlClientVersion must be a "
+ + "positive integer");
+ }
+ this.minSqlClientVersion = minSqlClientVersion;
+ return this;
+ }
+
+ /**
+ * Sets the wire-encoded StatsPolicyConfig proto that contains information to verify the
+ * query against a policy defined on the underlying data.
+ *
+ * @param policyConfig The wire-encoded StatsPolicyConfig proto.
+ */
+ @NonNull
+ public Builder setPolicyConfig(@NonNull final byte[] policyConfig) {
+ this.policyConfig = policyConfig;
+ return this;
+ }
+
+ /**
+ * Builds a new instance of {@link StatsQuery}.
+ *
+ * @return A new instance of {@link StatsQuery}.
+ */
+ @NonNull
+ public StatsQuery build() {
+ return new StatsQuery(sqlDialect, rawSql, minSqlClientVersion, policyConfig);
+ }
+ }
+}
diff --git a/framework/java/android/util/StatsEvent.java b/framework/java/android/util/StatsEvent.java
index 51676f9..221226d 100644
--- a/framework/java/android/util/StatsEvent.java
+++ b/framework/java/android/util/StatsEvent.java
@@ -870,7 +870,7 @@
private void release() {
// Recycle this Buffer if its size is MAX_PUSH_PAYLOAD_SIZE or under.
- if (mBytes.length <= MAX_PUSH_PAYLOAD_SIZE) {
+ if (mMaxSize <= MAX_PUSH_PAYLOAD_SIZE) {
synchronized (sLock) {
if (null == sPool) {
sPool = this;
diff --git a/framework/java/android/util/StatsLog.java b/framework/java/android/util/StatsLog.java
index 28884c1..f38751a 100644
--- a/framework/java/android/util/StatsLog.java
+++ b/framework/java/android/util/StatsLog.java
@@ -20,17 +20,24 @@
import static android.Manifest.permission.PACKAGE_USAGE_STATS;
import android.Manifest;
+import android.annotation.IntDef;
import android.annotation.NonNull;
import android.annotation.RequiresPermission;
import android.annotation.SuppressLint;
import android.annotation.SystemApi;
import android.content.Context;
+import android.os.Build;
import android.os.IStatsd;
import android.os.Process;
import android.util.proto.ProtoOutputStream;
+import androidx.annotation.RequiresApi;
+
import com.android.internal.statsd.StatsdStatsLog;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
/**
* StatsLog provides an API for developers to send events to statsd. The events can be used to
* define custom metrics inside statsd.
@@ -46,80 +53,290 @@
private static final int EXPERIMENT_IDS_FIELD_ID = 1;
/**
- * Annotation ID constant for logging UID field.
- *
- * @hide
- */
+ * Annotation ID constant for logging UID field.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
@SuppressLint("NoByteOrShort")
@SystemApi
public static final byte ANNOTATION_ID_IS_UID = 1;
/**
- * Annotation ID constant to indicate logged atom event's timestamp should be truncated.
- *
- * @hide
- */
+ * Annotation ID constant to indicate logged atom event's timestamp should be truncated.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
@SuppressLint("NoByteOrShort")
@SystemApi
public static final byte ANNOTATION_ID_TRUNCATE_TIMESTAMP = 2;
/**
- * Annotation ID constant for a state atom's primary field.
- *
- * @hide
- */
+ * Annotation ID constant for a state atom's primary field.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
@SuppressLint("NoByteOrShort")
@SystemApi
public static final byte ANNOTATION_ID_PRIMARY_FIELD = 3;
/**
- * Annotation ID constant for state atom's state field.
- *
- * @hide
- */
+ * Annotation ID constant for state atom's state field.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
@SuppressLint("NoByteOrShort")
@SystemApi
public static final byte ANNOTATION_ID_EXCLUSIVE_STATE = 4;
/**
- * Annotation ID constant to indicate the first UID in the attribution chain
- * is a primary field.
- * Should only be used for attribution chain fields.
- *
- * @hide
- */
+ * Annotation ID constant to indicate the first UID in the attribution chain
+ * is a primary field.
+ * Should only be used for attribution chain fields.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
@SuppressLint("NoByteOrShort")
@SystemApi
public static final byte ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID = 5;
/**
- * Annotation ID constant to indicate which state is default for the state atom.
- *
- * @hide
- */
+ * Annotation ID constant to indicate which state is default for the state atom.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
@SuppressLint("NoByteOrShort")
@SystemApi
public static final byte ANNOTATION_ID_DEFAULT_STATE = 6;
/**
- * Annotation ID constant to signal all states should be reset to the default state.
- *
- * @hide
- */
+ * Annotation ID constant to signal all states should be reset to the default state.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
@SuppressLint("NoByteOrShort")
@SystemApi
public static final byte ANNOTATION_ID_TRIGGER_STATE_RESET = 7;
/**
- * Annotation ID constant to indicate state changes need to account for nesting.
- * This should only be used with binary state atoms.
- *
- * @hide
- */
+ * Annotation ID constant to indicate state changes need to account for nesting.
+ * This should only be used with binary state atoms.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
@SuppressLint("NoByteOrShort")
@SystemApi
public static final byte ANNOTATION_ID_STATE_NESTED = 8;
+ /**
+ * Annotation ID constant to indicate the restriction category of an atom.
+ * This annotation must only be attached to the atom id. This is an int annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_RESTRICTION_CATEGORY = 9;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains peripheral device info.
+ * This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_PERIPHERAL_DEVICE_INFO = 10;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains app usage information.
+ * This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_APP_USAGE = 11;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains app activity information.
+ * This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_APP_ACTIVITY = 12;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains health connect information.
+ * This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_HEALTH_CONNECT = 13;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains accessibility information.
+ * This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_ACCESSIBILITY = 14;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains system search information.
+ * This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_SYSTEM_SEARCH = 15;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains user engagement information.
+ * This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_USER_ENGAGEMENT = 16;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains ambient sensing information.
+ * This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_AMBIENT_SENSING = 17;
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains demographic classification
+ * information. This is a bool annotation.
+ *
+ * The ID is a byte since StatsEvent.addBooleanAnnotation() and StatsEvent.addIntAnnotation()
+ * accept byte as the type for annotation ids to save space.
+ *
+ * @hide
+ */
+ @SuppressLint("NoByteOrShort")
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final byte ANNOTATION_ID_FIELD_RESTRICTION_DEMOGRAPHIC_CLASSIFICATION = 18;
+
+
+ /** @hide */
+ @IntDef(prefix = { "RESTRICTION_CATEGORY_" }, value = {
+ RESTRICTION_CATEGORY_DIAGNOSTIC,
+ RESTRICTION_CATEGORY_SYSTEM_INTELLIGENCE,
+ RESTRICTION_CATEGORY_AUTHENTICATION,
+ RESTRICTION_CATEGORY_FRAUD_AND_ABUSE})
+ @Retention(RetentionPolicy.SOURCE)
+ public @interface RestrictionCategory {}
+
+ /**
+ * Restriction category for atoms about diagnostics.
+ *
+ * @hide
+ */
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final int RESTRICTION_CATEGORY_DIAGNOSTIC = 1;
+
+ /**
+ * Restriction category for atoms about system intelligence.
+ *
+ * @hide
+ */
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final int RESTRICTION_CATEGORY_SYSTEM_INTELLIGENCE = 2;
+
+ /**
+ * Restriction category for atoms about authentication.
+ *
+ * @hide
+ */
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final int RESTRICTION_CATEGORY_AUTHENTICATION = 3;
+
+ /**
+ * Restriction category for atoms about fraud and abuse.
+ *
+ * @hide
+ */
+ @SystemApi
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public static final int RESTRICTION_CATEGORY_FRAUD_AND_ABUSE = 4;
+
private StatsLog() {
}
diff --git a/framework/test/unittests/AndroidTest.xml b/framework/test/unittests/AndroidTest.xml
index e32f16e..4ab7b64 100644
--- a/framework/test/unittests/AndroidTest.xml
+++ b/framework/test/unittests/AndroidTest.xml
@@ -14,7 +14,7 @@
limitations under the License.
-->
<configuration description="Runs Tests for Statsd.">
- <target_preparer class="com.android.tradefed.targetprep.TestAppInstallSetup">
+ <target_preparer class="com.android.tradefed.targetprep.suite.SuiteApkInstaller">
<option name="test-file-name" value="FrameworkStatsdTest.apk" />
<option name="install-arg" value="-g" />
</target_preparer>
diff --git a/lib/libkll/Android.bp b/lib/libkll/Android.bp
index a901101..5f05fb7 100644
--- a/lib/libkll/Android.bp
+++ b/lib/libkll/Android.bp
@@ -23,6 +23,9 @@
cc_library {
name: "libkll",
+ host_supported: true,
+ vendor_available: true,
+ double_loadable: true,
srcs: [
"compactor_stack.cpp",
"kll.cpp",
@@ -33,6 +36,7 @@
"libkll-protos",
],
shared_libs: [
+ "liblog",
"libprotobuf-cpp-lite",
],
cflags: [
@@ -51,6 +55,10 @@
cc_test {
name: "libkll_test",
+ host_supported: true,
+ tidy_timeout_srcs: [
+ "tests/sampler_test.cpp",
+ ],
srcs: [
"tests/**/*.cpp",
],
@@ -61,6 +69,7 @@
"libkll-protos",
],
shared_libs: [
+ "liblog",
"libprotobuf-cpp-lite",
],
test_suites: ["general-tests"],
diff --git a/lib/libkll/compactor_stack.cpp b/lib/libkll/compactor_stack.cpp
index 9ffb044..0262d00 100644
--- a/lib/libkll/compactor_stack.cpp
+++ b/lib/libkll/compactor_stack.cpp
@@ -15,6 +15,10 @@
*/
#include "compactor_stack.h"
+#define LOG_TAG "libkll"
+
+#include <log/log.h>
+
#include <vector>
#include "random_generator.h"
@@ -114,6 +118,7 @@
}
void CompactorStack::CompactStack() {
+ int initial_num_items_in_compactors = num_items_in_compactors_;
while (num_items_in_compactors_ >= overall_capacity_) {
for (size_t i = 0; i < compactors_.size(); i++) {
if (!compactors_[i].empty() &&
@@ -124,6 +129,20 @@
}
}
}
+ // TODO(b/237694338): Remove the temporary infinite loop detection code
+ if (num_items_in_compactors_ >= initial_num_items_in_compactors) {
+ // The loop above didn't do anything in terms of reducing the number of items.
+ // To prevent an infinite loop, crash now.
+ ALOGI("num_items_in_compactors_=%d, compactors_.size()=%zu, overall_capacity_=%d",
+ num_items_in_compactors_, compactors_.size(), overall_capacity_);
+ for (size_t i = 0; i < compactors_.size(); i++) {
+ const std::vector<int64_t>& compactor = compactors_[i];
+ ALOGI("compactors_[%zu].size()=%zu, TargetCapacityAtLevel(i)=%d", i,
+ compactor.size(), TargetCapacityAtLevel(i));
+ }
+ LOG_ALWAYS_FATAL("Detected infinite loop in %s ", __func__);
+ }
+ initial_num_items_in_compactors = num_items_in_compactors_;
}
}
diff --git a/lib/libkll/encoding/Android.bp b/lib/libkll/encoding/Android.bp
index e151de2..4d3813c 100644
--- a/lib/libkll/encoding/Android.bp
+++ b/lib/libkll/encoding/Android.bp
@@ -23,6 +23,9 @@
cc_library_static {
name: "libkll-encoder",
+ host_supported: true,
+ vendor_available: true,
+ double_loadable: true,
srcs: [
"encoder.cpp",
"varint.cpp",
diff --git a/lib/libkll/proto/Android.bp b/lib/libkll/proto/Android.bp
index e2f6ef7..ec85a7e 100644
--- a/lib/libkll/proto/Android.bp
+++ b/lib/libkll/proto/Android.bp
@@ -23,6 +23,9 @@
cc_library_static {
name: "libkll-protos",
+ host_supported: true,
+ vendor_available: true,
+ double_loadable: true,
srcs: ["*.proto"],
proto: {
export_proto_headers: true,
diff --git a/lib/libstatsgtestmatchers/Android.bp b/lib/libstatsgtestmatchers/Android.bp
new file mode 100644
index 0000000..fe832fa
--- /dev/null
+++ b/lib/libstatsgtestmatchers/Android.bp
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// ==========================================================
+// Native library for custom GoogleTest matchers
+// ==========================================================
+package {
+ default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+cc_test_library {
+ name: "libstatsgtestmatchers",
+ srcs: [
+ ":libstats_log_protos",
+ ":libstats_subscription_protos",
+ ],
+ export_include_dirs: ["include"],
+ proto: {
+ type: "lite",
+ include_dirs: [
+ "external/protobuf/src",
+ ],
+ static: true,
+ },
+ static_libs: [
+ "libgmock",
+ "libprotobuf-cpp-lite",
+ ],
+ shared: {
+ enabled: false,
+ },
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+ visibility: [
+ "//packages/modules/StatsD/lib/libstatspull",
+ "//packages/modules/StatsD/statsd",
+ ],
+}
diff --git a/lib/libstatsgtestmatchers/include/gtest_matchers.h b/lib/libstatsgtestmatchers/include/gtest_matchers.h
new file mode 100644
index 0000000..084346d
--- /dev/null
+++ b/lib/libstatsgtestmatchers/include/gtest_matchers.h
@@ -0,0 +1,231 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <ostream>
+
+#include "frameworks/proto_logging/stats/atoms.pb.h"
+#include "frameworks/proto_logging/stats/attribution_node.pb.h"
+#include "packages/modules/StatsD/statsd/src/shell/shell_data.pb.h"
+#include "packages/modules/StatsD/statsd/src/stats_log.pb.h"
+
+namespace android {
+namespace os {
+namespace statsd {
+
+using PackageInfo = UidMapping_PackageInfoSnapshot_PackageInfo;
+
+// clang-format off
+
+#define PROPERTY_MATCHER(typename, propname, matcher) \
+ testing::Property(#propname, &typename::propname, matcher(expected.propname()))
+
+#define REPEATED_PROPERTY_MATCHER(typename, propname, matcher) \
+ testing::Property(#propname, &typename::propname, \
+ testing::Pointwise(matcher(), expected.propname()))
+
+#define PROPERTY_EQ(typename, propname) PROPERTY_MATCHER(typename, propname, testing::Eq)
+
+#define REPEATED_PROPERTY_EQ(typename, propname) \
+ REPEATED_PROPERTY_MATCHER(typename, propname, testing::Eq)
+
+/**
+ * Generate a GoogleTest equality matcher for a custom type specified by typename with the given
+ * properties.
+ *
+ * Example: For the protos:
+ * message Bar {
+ * optional int32 aa = 1;
+ * }
+ * message Foo {
+ * optional int32 a = 1;
+ * repeated float b = 2;
+ * optional Bar bar = 3;
+ * repeated Bar repeated_bar = 4;
+ * }
+ * This will generate equality matchers for them called EqBar() and EqFoo():
+ * EQ_MATCHER(Bar, PROPERTY_EQ(Bar, aa));
+ * EQ_MATCHER(Foo,
+ * PROPERTY_EQ(Foo, a),
+ * PROPERTY_EQ(Foo, b),
+ * PROPERTY_MATCHER(Foo, bar, EqBar),
+ * REPEATED_PROPERTY_MATCHER(Foo, repeated_bar, EqBar)
+ * );
+ */
+#define EQ_MATCHER(typename, properties...) \
+ MATCHER(Eq##typename, " ") { \
+ return testing::Matches(Eq##typename(std::get<1>(arg)))(std::get<0>(arg)); \
+ } \
+ MATCHER_P(Eq##typename, expected, testing::PrintToString(expected)) { \
+ return testing::ExplainMatchResult(testing::AllOf(properties), arg, result_listener); \
+ }
+
+#define PROPERTY_PRINT(propname) \
+ if (obj.has_##propname()) { \
+ *os << #propname << ": " << testing::PrintToString(obj.propname()) << ", "; \
+ }
+
+#define REPEATED_PROPERTY_PRINT(propname) \
+ if (obj.propname##_size() > 0) { \
+ *os << #propname << ": " << testing::PrintToString(obj.propname()) << ", "; \
+ }
+
+/**
+ * Generates a method that teaches GoogleTest how to print a custom type specified by typename
+ * with the given properties.
+ * The equality matcher generated by EQ_MATCHER for the given typename will use this generated
+ * printer method to print the object when the matcher fails.
+ *
+ * Example: For the protos:
+ * message Bar {
+ * optional int32 aa = 1;
+ * }
+ * message Foo {
+ * optional int32 a = 1;
+ * repeated float b = 2;
+ * optional Bar bar = 3;
+ * repeated Bar repeated_bar = 4;
+ * }
+ * This will generate printer methods for them:
+ * TYPE_PRINTER(Bar, PROPERTY_PRINT(aa));
+ * TYPE_PRINTER(Foo,
+ * PROPERTY_PRINT(a)
+ * PROPERTY_PRINT(b)
+ * PROPERTY_PRINT(bar)
+ * REPEATED_PROPERTY_PRINT(repeated_bar)
+ * );
+ */
+#define TYPE_PRINTER(typename, properties) \
+ inline void PrintTo(const typename& obj, std::ostream* os) { \
+ *os << #typename << ": { "; \
+ properties \
+ *os << "}"; \
+ }
+
+EQ_MATCHER(PackageInfo,
+ PROPERTY_EQ(PackageInfo, version),
+ PROPERTY_EQ(PackageInfo, uid),
+ PROPERTY_EQ(PackageInfo, deleted),
+ PROPERTY_EQ(PackageInfo, truncated_certificate_hash),
+ PROPERTY_EQ(PackageInfo, name_hash),
+ PROPERTY_EQ(PackageInfo, version_string_hash),
+ PROPERTY_EQ(PackageInfo, name),
+ PROPERTY_EQ(PackageInfo, version_string),
+ PROPERTY_EQ(PackageInfo, installer_index),
+ PROPERTY_EQ(PackageInfo, installer_hash),
+ PROPERTY_EQ(PackageInfo, installer)
+);
+TYPE_PRINTER(PackageInfo,
+ PROPERTY_PRINT(version)
+ PROPERTY_PRINT(uid)
+ PROPERTY_PRINT(deleted)
+ PROPERTY_PRINT(truncated_certificate_hash)
+ PROPERTY_PRINT(name_hash)
+ PROPERTY_PRINT(version_string_hash)
+ PROPERTY_PRINT(name)
+ PROPERTY_PRINT(version_string)
+ PROPERTY_PRINT(installer_index)
+ PROPERTY_PRINT(installer_hash)
+ PROPERTY_PRINT(installer)
+);
+
+EQ_MATCHER(AttributionNode,
+ PROPERTY_EQ(AttributionNode, uid),
+ PROPERTY_EQ(AttributionNode, tag)
+);
+TYPE_PRINTER(AttributionNode,
+ PROPERTY_PRINT(uid)
+ PROPERTY_PRINT(tag)
+);
+
+EQ_MATCHER(ScreenStateChanged, PROPERTY_EQ(ScreenStateChanged, state));
+TYPE_PRINTER(ScreenStateChanged, PROPERTY_PRINT(state));
+
+EQ_MATCHER(TrainExperimentIds,
+ REPEATED_PROPERTY_EQ(TrainExperimentIds, experiment_id)
+);
+TYPE_PRINTER(TrainExperimentIds, REPEATED_PROPERTY_PRINT(experiment_id));
+
+EQ_MATCHER(TestAtomReported,
+ REPEATED_PROPERTY_MATCHER(TestAtomReported, attribution_node, EqAttributionNode),
+ PROPERTY_EQ(TestAtomReported, int_field),
+ PROPERTY_EQ(TestAtomReported, long_field),
+ PROPERTY_EQ(TestAtomReported, float_field),
+ PROPERTY_EQ(TestAtomReported, string_field),
+ PROPERTY_EQ(TestAtomReported, boolean_field),
+ PROPERTY_EQ(TestAtomReported, state),
+ PROPERTY_MATCHER(TestAtomReported, bytes_field, EqTrainExperimentIds),
+ REPEATED_PROPERTY_EQ(TestAtomReported, repeated_int_field),
+ REPEATED_PROPERTY_EQ(TestAtomReported, repeated_long_field),
+ REPEATED_PROPERTY_EQ(TestAtomReported, repeated_float_field),
+ REPEATED_PROPERTY_EQ(TestAtomReported, repeated_string_field),
+ REPEATED_PROPERTY_EQ(TestAtomReported, repeated_boolean_field),
+ REPEATED_PROPERTY_EQ(TestAtomReported, repeated_enum_field)
+);
+TYPE_PRINTER(TestAtomReported,
+ REPEATED_PROPERTY_PRINT(attribution_node)
+ PROPERTY_PRINT(int_field)
+ PROPERTY_PRINT(long_field)
+ PROPERTY_PRINT(float_field)
+ PROPERTY_PRINT(string_field)
+ PROPERTY_PRINT(boolean_field)
+ PROPERTY_PRINT(state)
+ PROPERTY_PRINT(bytes_field)
+ REPEATED_PROPERTY_PRINT(repeated_int_field)
+ REPEATED_PROPERTY_PRINT(repeated_long_field)
+ REPEATED_PROPERTY_PRINT(repeated_float_field)
+ REPEATED_PROPERTY_PRINT(repeated_string_field)
+ REPEATED_PROPERTY_PRINT(repeated_boolean_field)
+ REPEATED_PROPERTY_PRINT(repeated_enum_field)
+);
+
+EQ_MATCHER(CpuActiveTime,
+ PROPERTY_EQ(CpuActiveTime, uid),
+ PROPERTY_EQ(CpuActiveTime, time_millis)
+);
+TYPE_PRINTER(CpuActiveTime,
+ PROPERTY_PRINT(uid)
+ PROPERTY_PRINT(time_millis)
+);
+
+EQ_MATCHER(PluggedStateChanged, PROPERTY_EQ(PluggedStateChanged, state));
+TYPE_PRINTER(PluggedStateChanged, PROPERTY_PRINT(state));
+
+EQ_MATCHER(Atom,
+ PROPERTY_MATCHER(Atom, screen_state_changed, EqScreenStateChanged),
+ PROPERTY_MATCHER(Atom, test_atom_reported, EqTestAtomReported)
+);
+TYPE_PRINTER(Atom,
+ PROPERTY_PRINT(screen_state_changed)
+ PROPERTY_PRINT(test_atom_reported)
+);
+
+EQ_MATCHER(ShellData,
+ REPEATED_PROPERTY_MATCHER(ShellData, atom, EqAtom),
+ REPEATED_PROPERTY_EQ(ShellData, elapsed_timestamp_nanos)
+);
+TYPE_PRINTER(ShellData,
+ REPEATED_PROPERTY_PRINT(atom)
+ REPEATED_PROPERTY_PRINT(elapsed_timestamp_nanos)
+);
+
+// clang-format on
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/lib/libstatspull/Android.bp b/lib/libstatspull/Android.bp
index c0bc46d..99c3e54 100644
--- a/lib/libstatspull/Android.bp
+++ b/lib/libstatspull/Android.bp
@@ -24,6 +24,8 @@
cc_defaults {
name: "libstatspull_defaults",
srcs: [
+ "stats_subscription.cpp",
+ "stats_provider.cpp",
"stats_pull_atom_callback.cpp",
],
cflags: [
@@ -48,6 +50,7 @@
},
},
}
+
cc_library {
name: "libstatspull",
defaults: [
@@ -83,11 +86,6 @@
min_sdk_version: "30",
stl: "libc++_static",
-
- // TODO(b/151102177): Enable it when the build error is fixed.
- header_abi_checker: {
- enabled: false,
- },
}
cc_library_headers {
@@ -109,21 +107,45 @@
],
}
-// Note: These unit tests only test PullAtomMetadata.
-// For full E2E tests of libstatspull, use LibStatsPullTests
+// Note: These unit tests only test PullAtomMetadata and subscriptions
+// For full E2E tests of pullers, use LibStatsPullTests
cc_test {
name: "libstatspull_test",
srcs: [
+ ":libprotobuf-internal-descriptor-proto",
+ ":libstats_log_protos",
+ ":libstats_subscription_protos",
"tests/pull_atom_metadata_test.cpp",
+ "tests/stats_subscription_test.cpp",
],
+ proto: {
+ type: "lite",
+ include_dirs: [
+ "external/protobuf/src",
+ ],
+ static: true,
+ },
shared_libs: [
"libstatspull",
"libstatssocket",
+ "libbase",
+ "libbinder",
+ "libutils",
+ "liblog",
],
- test_suites: ["general-tests", "mts-statsd"],
+ static_libs: [
+ "libgmock",
+ "libstatsgtestmatchers",
+ "libstatslog_statsdtest",
+ "libprotobuf-cpp-lite",
+ ],
+ test_suites: [
+ "general-tests",
+ "mts-statsd",
+ ],
test_config: "libstatspull_test.xml",
- //TODO(b/153588990): Remove when the build system properly separates
+ //TODO(b/153588990): Remove when the build system properly separates
//32bit and 64bit architectures.
compile_multilib: "both",
multilib: {
@@ -141,6 +163,8 @@
"-Wno-unused-variable",
"-Wno-unused-function",
"-Wno-unused-parameter",
+ "-Wno-deprecated-declarations",
],
require_root: true,
+ min_sdk_version: "30",
}
diff --git a/lib/libstatspull/include/stats_subscription.h b/lib/libstatspull/include/stats_subscription.h
new file mode 100644
index 0000000..6509c2b
--- /dev/null
+++ b/lib/libstatspull/include/stats_subscription.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <stdint.h>
+#include <sys/cdefs.h>
+
+#ifndef __STATSD_SUBS_MIN_API__
+#define __STATSD_SUBS_MIN_API__ __ANDROID_API_U__
+#endif
+
+__BEGIN_DECLS
+
+/**
+ * Reason codes for why subscription callback was triggered.
+ */
+typedef enum AStatsManager_SubscriptionCallbackReason : uint32_t {
+ /**
+ * SubscriptionCallbackReason constant for subscription data transfer initiated by stats
+ * service.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_STATSD_INITIATED = 1,
+
+ /**
+ * SubscriptionCallbackReason constant for subscriber requesting flush of pending data.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_FLUSH_REQUESTED = 2,
+
+ /**
+ * SubscriptionCallbackReason constant for final stream of data for a subscription.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_SUBSCRIPTION_ENDED = 3,
+} AStatsManager_SubscriptionCallbackReason;
+
+/**
+ * Callback interface for receiving subscription data by the stats service.
+ *
+ * This will be called on an arbitrary binder thread. There is a pool of such threads and there is a
+ * no guarantee a single thread will be used even for the same subscription. Clients must ensure it
+ * is safe to call callback from arbitrary threads.
+ *
+ * \param subscription_id the subscription id for which the callback is triggered.
+ * \param reason code for why the callback is triggered.
+ * \param payload encoded SubscriptionResults proto containing subscription data.
+ * Cannot be null.
+ * \param num_bytes size in bytes of the payload.
+ * \param cookie the opaque pointer passed in AStatsManager_addSubscription. Can be null.
+ *
+ * Introduced in API 34.
+ */
+typedef void (*AStatsManager_SubscriptionCallback)(int32_t subscription_id,
+ AStatsManager_SubscriptionCallbackReason reason,
+ uint8_t* _Nonnull payload, size_t num_bytes,
+ void* _Nullable cookie);
+
+/**
+ * Adds a new subscription.
+ *
+ * Requires caller is in the traced_probes selinux domain.
+ *
+ * \param subscription_config encoded ShellSubscription proto containing parameters for a new
+ * subscription. Cannot be null.
+ * \param num_bytes size in bytes of the subscription_config.
+ * \param callback function called to deliver subscription data back to the subscriber. Each
+ * callback can be used for more than one subscription. Cannot be null.
+ * \param cookie opaque pointer to associate with the subscription. The provided callback will be
+ * invoked with this cookie as an argument when delivering data for this subscription. Can be
+ * null.
+ * \return subscription ID for the new subscription. Subscription ID is a positive integer. A
+ * negative value indicates an error.
+ *
+ * Introduced in API 34.
+ */
+int32_t AStatsManager_addSubscription(const uint8_t* _Nonnull subscription_config, size_t num_bytes,
+ const AStatsManager_SubscriptionCallback _Nonnull callback,
+ void* _Nullable cookie)
+ __INTRODUCED_IN(__STATSD_SUBS_MIN_API__);
+
+/**
+ * Removes an existing subscription.
+ * This will trigger a flush of the remaining subscription data through
+ * AStatsManager_SubscriptionCallback with the reason as
+ * ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_SUBSCRIPTION_ENDED.
+ *
+ * Requires caller is in the traced_probes selinux domain.
+ *
+ * \param subscription_id subscription id of the subscription to terminate.
+ *
+ * Introduced in API 34.
+ */
+void AStatsManager_removeSubscription(int32_t subscription_id)
+ __INTRODUCED_IN(__STATSD_SUBS_MIN_API__);
+
+/**
+ * Request stats service to flush a subscription.
+ * This will trigger AStatsManager_SubscriptionCallback with the reason as
+ * ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_FLUSH_REQUESTED.
+ *
+ * Requires caller is in the traced_probes selinux domain.
+ *
+ * \param subscription_id ID of the subscription to be flushed.
+ *
+ * Introduced in API 34.
+ */
+void AStatsManager_flushSubscription(int32_t subscription_id)
+ __INTRODUCED_IN(__STATSD_SUBS_MIN_API__);
+
+__END_DECLS
diff --git a/lib/libstatspull/libstatspull.map.txt b/lib/libstatspull/libstatspull.map.txt
index e0e851a..30641e4 100644
--- a/lib/libstatspull/libstatspull.map.txt
+++ b/lib/libstatspull/libstatspull.map.txt
@@ -12,6 +12,10 @@
AStatsEventList_addStatsEvent; # apex # introduced=30
AStatsManager_setPullAtomCallback; # apex # introduced=30
AStatsManager_clearPullAtomCallback; # apex # introduced=30
+
+ AStatsManager_addSubscription; # apex # introduced=UpsideDownCake
+ AStatsManager_removeSubscription; # apex # introduced=UpsideDownCake
+ AStatsManager_flushSubscription; # apex # introduced=UpsideDownCake
local:
*;
};
diff --git a/lib/libstatspull/libstatspull_test.xml b/lib/libstatspull/libstatspull_test.xml
index 233fc1f..5b06f1e 100644
--- a/lib/libstatspull/libstatspull_test.xml
+++ b/lib/libstatspull/libstatspull_test.xml
@@ -29,6 +29,7 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp" />
<option name="module-name" value="libstatspull_test" />
+ <option name="native-test-timeout" value="180000"/>
</test>
<object type="module_controller" class="com.android.tradefed.testtype.suite.module.MainlineTestModuleController">
diff --git a/lib/libstatspull/stats_provider.cpp b/lib/libstatspull/stats_provider.cpp
new file mode 100644
index 0000000..bee8e89
--- /dev/null
+++ b/lib/libstatspull/stats_provider.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/binder_manager.h>
+#include <stats_provider.h>
+
+using aidl::android::os::IStatsd;
+
+StatsProvider::StatsProvider(StatsProviderBinderDiedCallback callback)
+ : mDeathRecipient(AIBinder_DeathRecipient_new(binderDied)), mCallback(callback) {
+}
+
+StatsProvider::~StatsProvider() {
+ resetStatsService();
+}
+
+std::shared_ptr<IStatsd> StatsProvider::getStatsService() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (!mStatsd) {
+ // Fetch statsd
+ ::ndk::SpAIBinder binder(AServiceManager_getService("stats"));
+ mStatsd = IStatsd::fromBinder(binder);
+ if (mStatsd) {
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(), this);
+ }
+ }
+ return mStatsd;
+}
+
+void StatsProvider::resetStatsService() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mStatsd = nullptr;
+}
+
+void StatsProvider::binderDied(void* cookie) {
+ StatsProvider* statsProvider = static_cast<StatsProvider*>(cookie);
+ statsProvider->resetStatsService();
+ statsProvider->mCallback();
+}
diff --git a/lib/libstatspull/stats_provider.h b/lib/libstatspull/stats_provider.h
new file mode 100644
index 0000000..c9d9246
--- /dev/null
+++ b/lib/libstatspull/stats_provider.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aidl/android/os/IStatsd.h>
+#include <android/binder_auto_utils.h>
+
+using StatsProviderBinderDiedCallback = void (*)(void);
+
+/**
+ * Wrapper class for providing IStatsd Binder service.
+ * It handles Binder death and registers a callback for when the Binder service is restored after
+ * death.
+ */
+class StatsProvider {
+public:
+ StatsProvider(StatsProviderBinderDiedCallback callback);
+
+ ~StatsProvider();
+
+ std::shared_ptr<aidl::android::os::IStatsd> getStatsService();
+
+private:
+ static void binderDied(void* cookie);
+
+ void resetStatsService();
+
+ std::mutex mMutex;
+ std::shared_ptr<aidl::android::os::IStatsd> mStatsd;
+ const ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
+ const StatsProviderBinderDiedCallback mCallback;
+};
diff --git a/lib/libstatspull/stats_pull_atom_callback.cpp b/lib/libstatspull/stats_pull_atom_callback.cpp
index f85db08..3fdf243 100644
--- a/lib/libstatspull/stats_pull_atom_callback.cpp
+++ b/lib/libstatspull/stats_pull_atom_callback.cpp
@@ -14,13 +14,6 @@
* limitations under the License.
*/
-#include <map>
-#include <thread>
-#include <vector>
-
-#include <stats_event.h>
-#include <stats_pull_atom_callback.h>
-
#include <aidl/android/os/BnPullAtomCallback.h>
#include <aidl/android/os/IPullAtomResultReceiver.h>
#include <aidl/android/os/IStatsd.h>
@@ -28,6 +21,13 @@
#include <android/binder_auto_utils.h>
#include <android/binder_ibinder.h>
#include <android/binder_manager.h>
+#include <stats_event.h>
+#include <stats_pull_atom_callback.h>
+
+#include <map>
+#include <queue>
+#include <thread>
+#include <vector>
using Status = ::ndk::ScopedAStatus;
using aidl::android::os::BnPullAtomCallback;
@@ -47,7 +47,7 @@
}
constexpr int64_t DEFAULT_COOL_DOWN_MILLIS = 1000LL; // 1 second.
-constexpr int64_t DEFAULT_TIMEOUT_MILLIS = 2000LL; // 2 seconds.
+constexpr int64_t DEFAULT_TIMEOUT_MILLIS = 1500LL; // 1.5 seconds.
struct AStatsManager_PullAtomMetadata {
int64_t cool_down_millis;
@@ -159,20 +159,20 @@
};
/**
- * @brief pullAtomMutex is used to guard simultaneous access to mPullers from below threads
+ * @brief pullersMutex is used to guard simultaneous access to pullers from below threads
* Main thread
* - AStatsManager_setPullAtomCallback()
* - AStatsManager_clearPullAtomCallback()
* Binder thread:
* - StatsdProvider::binderDied()
*/
-static std::mutex pullAtomMutex;
+static std::mutex pullersMutex;
-static std::map<int32_t, std::shared_ptr<StatsPullAtomCallbackInternal>> mPullers;
+static std::map<int32_t, std::shared_ptr<StatsPullAtomCallbackInternal>> pullers;
class StatsdProvider {
public:
- StatsdProvider() : deathRecipient(AIBinder_DeathRecipient_new(binderDied)) {
+ StatsdProvider() : mDeathRecipient(AIBinder_DeathRecipient_new(binderDied)) {
}
~StatsdProvider() {
@@ -180,21 +180,26 @@
}
std::shared_ptr<IStatsd> getStatsService() {
- std::lock_guard<std::mutex> lock(statsdMutex);
- if (!statsd) {
+ // There are host unit tests which are using libstatspull
+ // Since we do not have statsd on host - the getStatsService() is no-op and
+ // should return nullptr
+#ifdef __ANDROID__
+ std::lock_guard<std::mutex> lock(mStatsdMutex);
+ if (!mStatsd) {
// Fetch statsd
::ndk::SpAIBinder binder(AServiceManager_getService("stats"));
- statsd = IStatsd::fromBinder(binder);
- if (statsd) {
- AIBinder_linkToDeath(binder.get(), deathRecipient.get(), this);
+ mStatsd = IStatsd::fromBinder(binder);
+ if (mStatsd) {
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(), this);
}
}
- return statsd;
+#endif // __ANDROID__
+ return mStatsd;
}
void resetStatsService() {
- std::lock_guard<std::mutex> lock(statsdMutex);
- statsd = nullptr;
+ std::lock_guard<std::mutex> lock(mStatsdMutex);
+ mStatsd = nullptr;
}
static void binderDied(void* cookie) {
@@ -210,8 +215,8 @@
// copy of the data with the lock held before iterating through the map.
std::map<int32_t, std::shared_ptr<StatsPullAtomCallbackInternal>> pullersCopy;
{
- std::lock_guard<std::mutex> lock(pullAtomMutex);
- pullersCopy = mPullers;
+ std::lock_guard<std::mutex> lock(pullersMutex);
+ pullersCopy = pullers;
}
for (const auto& it : pullersCopy) {
statsService->registerNativePullAtomCallback(it.first, it.second->getCoolDownMillis(),
@@ -222,16 +227,16 @@
private:
/**
- * @brief statsdMutex is used to guard simultaneous access to statsd from below threads:
+ * @brief mStatsdMutex is used to guard simultaneous access to mStatsd from below threads:
* Work thread
* - registerStatsPullAtomCallbackBlocking()
* - unregisterStatsPullAtomCallbackBlocking()
* Binder thread:
* - StatsdProvider::binderDied()
*/
- std::mutex statsdMutex;
- std::shared_ptr<IStatsd> statsd;
- ::ndk::ScopedAIBinder_DeathRecipient deathRecipient;
+ std::mutex mStatsdMutex;
+ std::shared_ptr<IStatsd> mStatsd;
+ ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
};
static std::shared_ptr<StatsdProvider> statsProvider = std::make_shared<StatsdProvider>();
@@ -260,6 +265,114 @@
statsService->unregisterNativePullAtomCallback(atomTag);
}
+class CallbackOperationsHandler {
+ struct Cmd {
+ enum Type { CMD_REGISTER, CMD_UNREGISTER };
+
+ Type type;
+ int atomTag;
+ std::shared_ptr<StatsPullAtomCallbackInternal> callback;
+ };
+
+public:
+ ~CallbackOperationsHandler() {
+ for (auto& workThread : mWorkThreads) {
+ if (workThread.joinable()) {
+ mCondition.notify_one();
+ workThread.join();
+ }
+ }
+ }
+
+ static CallbackOperationsHandler& getInstance() {
+ static CallbackOperationsHandler handler;
+ return handler;
+ }
+
+ void registerCallback(int atomTag, std::shared_ptr<StatsPullAtomCallbackInternal> callback) {
+ auto registerCmd = std::make_unique<Cmd>();
+ registerCmd->type = Cmd::CMD_REGISTER;
+ registerCmd->atomTag = atomTag;
+ registerCmd->callback = std::move(callback);
+ pushToQueue(std::move(registerCmd));
+
+ std::thread registerThread(&CallbackOperationsHandler::processCommands, this,
+ statsProvider);
+ mWorkThreads.push_back(std::move(registerThread));
+ }
+
+ void unregisterCallback(int atomTag) {
+ auto unregisterCmd = std::make_unique<Cmd>();
+ unregisterCmd->type = Cmd::CMD_UNREGISTER;
+ unregisterCmd->atomTag = atomTag;
+ pushToQueue(std::move(unregisterCmd));
+
+ std::thread unregisterThread(&CallbackOperationsHandler::processCommands, this,
+ statsProvider);
+ mWorkThreads.push_back(std::move(unregisterThread));
+ }
+
+private:
+ std::vector<std::thread> mWorkThreads;
+
+ std::condition_variable mCondition;
+ std::mutex mMutex;
+ std::queue<std::unique_ptr<Cmd>> mCmdQueue;
+
+ CallbackOperationsHandler() {
+ }
+
+ void pushToQueue(std::unique_ptr<Cmd> cmd) {
+ {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mCmdQueue.push(std::move(cmd));
+ }
+ mCondition.notify_one();
+ }
+
+ void processCommands(std::shared_ptr<StatsdProvider> statsProvider) {
+ /**
+ * First trying to obtain stats service instance
+ * This is a blocking call, which waits on service readiness
+ */
+ const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
+
+ /**
+ * To guarantee sequential commands processing we need to lock mutex queue
+ */
+ std::unique_lock<std::mutex> lock(mMutex);
+ /**
+ * This should never really block in practice, since the command was already queued
+ * from the main thread by registerCallback or unregisterCallback.
+ * We are putting command to the queue, and only after a worker thread is created,
+ * which will pop a single command from a queue and will be terminated after processing.
+ * It makes producer/consumer as 1:1 match
+ */
+ if (mCmdQueue.empty()) {
+ mCondition.wait(lock, [this] { return !this->mCmdQueue.empty(); });
+ }
+
+ std::unique_ptr<Cmd> cmd = std::move(mCmdQueue.front());
+ mCmdQueue.pop();
+
+ if (!statsService) {
+ // Statsd not available - dropping command request
+ return;
+ }
+
+ switch (cmd->type) {
+ case Cmd::CMD_REGISTER: {
+ registerStatsPullAtomCallbackBlocking(cmd->atomTag, statsProvider, cmd->callback);
+ break;
+ }
+ case Cmd::CMD_UNREGISTER: {
+ unregisterStatsPullAtomCallbackBlocking(cmd->atomTag, statsProvider);
+ break;
+ }
+ }
+ }
+};
+
void AStatsManager_setPullAtomCallback(int32_t atom_tag, AStatsManager_PullAtomMetadata* metadata,
AStatsManager_PullAtomCallback callback, void* cookie) {
int64_t coolDownMillis =
@@ -276,24 +389,21 @@
timeoutMillis, additiveFields);
{
- std::lock_guard<std::mutex> lg(pullAtomMutex);
+ std::lock_guard<std::mutex> lock(pullersMutex);
// Always add to the map. If statsd is dead, we will add them when it comes back.
- mPullers[atom_tag] = callbackBinder;
+ pullers[atom_tag] = callbackBinder;
}
- std::thread registerThread(registerStatsPullAtomCallbackBlocking, atom_tag, statsProvider,
- callbackBinder);
- registerThread.detach();
+ CallbackOperationsHandler::getInstance().registerCallback(atom_tag, callbackBinder);
}
void AStatsManager_clearPullAtomCallback(int32_t atom_tag) {
{
- std::lock_guard<std::mutex> lg(pullAtomMutex);
+ std::lock_guard<std::mutex> lock(pullersMutex);
// Always remove the puller from our map.
// If statsd is down, we will not register it when it comes back.
- mPullers.erase(atom_tag);
+ pullers.erase(atom_tag);
}
- std::thread unregisterThread(unregisterStatsPullAtomCallbackBlocking, atom_tag, statsProvider);
- unregisterThread.detach();
+ CallbackOperationsHandler::getInstance().unregisterCallback(atom_tag);
}
diff --git a/lib/libstatspull/stats_subscription.cpp b/lib/libstatspull/stats_subscription.cpp
new file mode 100644
index 0000000..3bc7e79
--- /dev/null
+++ b/lib/libstatspull/stats_subscription.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aidl/android/os/BnStatsSubscriptionCallback.h>
+#include <aidl/android/os/IStatsd.h>
+#include <aidl/android/os/StatsSubscriptionCallbackReason.h>
+#include <android/binder_auto_utils.h>
+#include <stats_provider.h>
+#include <stats_subscription.h>
+
+#include <atomic>
+#include <map>
+#include <vector>
+
+using Status = ::ndk::ScopedAStatus;
+using aidl::android::os::BnStatsSubscriptionCallback;
+using aidl::android::os::IStatsd;
+using aidl::android::os::StatsSubscriptionCallbackReason;
+using ::ndk::SharedRefBase;
+
+class Subscription;
+
+// Mutex for accessing subscriptions map.
+static std::mutex subscriptionsMutex;
+
+// TODO(b/271039569): Store subscriptions in a singleton object.
+// Added subscriptions keyed by their subscription ID.
+static std::map</* subscription ID */ int32_t, std::shared_ptr<Subscription>> subscriptions;
+
+class Subscription : public BnStatsSubscriptionCallback {
+public:
+ Subscription(const int32_t subscriptionId, const std::vector<uint8_t>& subscriptionConfig,
+ const AStatsManager_SubscriptionCallback callback, void* cookie)
+ : mSubscriptionId(subscriptionId),
+ mSubscriptionParamsBytes(subscriptionConfig),
+ mCallback(callback),
+ mCookie(cookie) {
+ }
+
+ Status onSubscriptionData(const StatsSubscriptionCallbackReason reason,
+ const std::vector<uint8_t>& subscriptionPayload) override {
+ std::vector<uint8_t> mutablePayload = subscriptionPayload;
+ mCallback(mSubscriptionId, static_cast<AStatsManager_SubscriptionCallbackReason>(reason),
+ mutablePayload.data(), mutablePayload.size(), mCookie);
+
+ std::shared_ptr<Subscription> thisSubscription;
+ if (reason == StatsSubscriptionCallbackReason::SUBSCRIPTION_ENDED) {
+ std::lock_guard<std::mutex> lock(subscriptionsMutex);
+
+ auto subscriptionsIt = subscriptions.find(mSubscriptionId);
+ if (subscriptionsIt != subscriptions.end()) {
+ // Ensure this subscription's refcount doesn't hit 0 when we erase it from the
+ // subscriptions map by adding a local reference here.
+ thisSubscription = subscriptionsIt->second;
+
+ subscriptions.erase(subscriptionsIt);
+ }
+ }
+
+ return Status::ok();
+ }
+
+ const std::vector<uint8_t>& getSubscriptionParamsBytes() const {
+ return mSubscriptionParamsBytes;
+ }
+
+private:
+ const int32_t mSubscriptionId;
+ const std::vector<uint8_t> mSubscriptionParamsBytes;
+ const AStatsManager_SubscriptionCallback mCallback;
+ void* mCookie;
+};
+
+// forward declare so it can be referenced in StatsProvider constructor.
+static void onStatsBinderRestart();
+
+static std::shared_ptr<StatsProvider> statsProvider =
+ std::make_shared<StatsProvider>(onStatsBinderRestart);
+
+static void onStatsBinderRestart() {
+ const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
+ if (statsService == nullptr) {
+ return;
+ }
+
+ // Since we do not want to make an IPC with the lock held, we first create a
+ // copy of the data with the lock held before iterating through the map.
+ std::map<int32_t, std::shared_ptr<Subscription>> subscriptionsCopy;
+ {
+ std::lock_guard<std::mutex> lock(subscriptionsMutex);
+ subscriptionsCopy = subscriptions;
+ }
+ for (const auto& [_, subscription] : subscriptionsCopy) {
+ statsService->addSubscription(subscription->getSubscriptionParamsBytes(), subscription);
+ }
+}
+
+static int32_t getNextSubscriptionId() {
+ static std::atomic_int32_t nextSubscriptionId(0);
+ return ++nextSubscriptionId;
+}
+
+static std::shared_ptr<Subscription> getBinderCallbackForSubscription(
+ const int32_t subscription_id) {
+ std::lock_guard<std::mutex> lock(subscriptionsMutex);
+ auto subscriptionsIt = subscriptions.find(subscription_id);
+ if (subscriptionsIt == subscriptions.end()) {
+ return nullptr;
+ }
+ return subscriptionsIt->second;
+}
+
+int32_t AStatsManager_addSubscription(const uint8_t* subscription_config, const size_t num_bytes,
+ const AStatsManager_SubscriptionCallback callback,
+ void* cookie) {
+ const std::vector<uint8_t> subscriptionConfig(subscription_config,
+ subscription_config + num_bytes);
+ const int32_t subscriptionId(getNextSubscriptionId());
+ std::shared_ptr<Subscription> subscription =
+ SharedRefBase::make<Subscription>(subscriptionId, subscriptionConfig, callback, cookie);
+
+ {
+ std::lock_guard<std::mutex> lock(subscriptionsMutex);
+
+ subscriptions[subscriptionId] = subscription;
+ }
+
+ // TODO(b/270648168): Queue the binder call to not block on binder
+ const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
+ if (statsService != nullptr) {
+ statsService->addSubscription(subscriptionConfig, subscription);
+ }
+
+ return subscriptionId;
+}
+
+void AStatsManager_removeSubscription(const int32_t subscription_id) {
+ std::shared_ptr<Subscription> subscription = getBinderCallbackForSubscription(subscription_id);
+ if (subscription == nullptr) {
+ return;
+ }
+
+ // TODO(b/270648168): Queue the binder call to not block on binder
+ const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
+ if (statsService == nullptr) {
+ // Statsd not available.
+ // TODO(b/270656443): keep track of removeSubscription request and make the IPC call when
+ // statsd binder comes back up.
+ return;
+ }
+ statsService->removeSubscription(subscription);
+}
+
+void AStatsManager_flushSubscription(const int32_t subscription_id) {
+ std::shared_ptr<Subscription> subscription = getBinderCallbackForSubscription(subscription_id);
+ if (subscription == nullptr) {
+ return;
+ }
+
+ // TODO(b/270648168): Queue the binder call to not block on binder
+ const std::shared_ptr<IStatsd> statsService = statsProvider->getStatsService();
+ if (statsService == nullptr) {
+ // Statsd not available.
+ // TODO(b/270656443): keep track of flushSubscription request and make the IPC call when
+ // statsd binder comes back up.
+ return;
+ }
+
+ // TODO(b/273649282): Ensure the subscription is cleared in case the final Binder data
+ // callback fails.
+ statsService->flushSubscription(subscription);
+}
diff --git a/lib/libstatspull/tests/pull_atom_metadata_test.cpp b/lib/libstatspull/tests/pull_atom_metadata_test.cpp
index abc8e47..c3ebe5b 100644
--- a/lib/libstatspull/tests/pull_atom_metadata_test.cpp
+++ b/lib/libstatspull/tests/pull_atom_metadata_test.cpp
@@ -21,7 +21,7 @@
namespace {
static const int64_t DEFAULT_COOL_DOWN_MILLIS = 1000LL; // 1 second.
-static const int64_t DEFAULT_TIMEOUT_MILLIS = 2000LL; // 2 seconds.
+static const int64_t DEFAULT_TIMEOUT_MILLIS = 1500LL; // 1.5 seconds.
} // anonymous namespace
diff --git a/lib/libstatspull/tests/stats_subscription_test.cpp b/lib/libstatspull/tests/stats_subscription_test.cpp
new file mode 100644
index 0000000..301a937
--- /dev/null
+++ b/lib/libstatspull/tests/stats_subscription_test.cpp
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binder/ProcessState.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <gtest_matchers.h>
+#include <stats_subscription.h>
+#include <stdint.h>
+#include <utils/Looper.h>
+
+#include <chrono>
+#include <string>
+#include <thread>
+#include <vector>
+
+#include "packages/modules/StatsD/statsd/src/shell/shell_config.pb.h"
+#include "packages/modules/StatsD/statsd/src/shell/shell_data.pb.h"
+#include "statslog_statsdtest.h"
+
+#ifdef __ANDROID__
+
+using namespace testing;
+using android::Looper;
+using android::ProcessState;
+using android::sp;
+using android::os::statsd::Atom;
+using android::os::statsd::ShellData;
+using android::os::statsd::ShellSubscription;
+using android::os::statsd::TestAtomReported_State_OFF;
+using android::os::statsd::TrainExperimentIds;
+using android::os::statsd::util::BytesField;
+using android::os::statsd::util::SCREEN_BRIGHTNESS_CHANGED;
+using android::os::statsd::util::stats_write;
+using android::os::statsd::util::TEST_ATOM_REPORTED;
+using android::os::statsd::util::TEST_ATOM_REPORTED__REPEATED_ENUM_FIELD__OFF;
+using std::string;
+using std::vector;
+using std::this_thread::sleep_for;
+
+namespace {
+
+class SubscriptionTest : public Test {
+public:
+ SubscriptionTest() : looper(Looper::prepare(/*opts=*/0)) {
+ const TestInfo* const test_info = UnitTest::GetInstance()->current_test_info();
+ ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
+
+ *trainExpIds.mutable_experiment_id() = {expIds.begin(), expIds.end()};
+ trainExpIds.SerializeToString(&trainExpIdsBytes);
+ }
+
+ ~SubscriptionTest() {
+ const TestInfo* const test_info = UnitTest::GetInstance()->current_test_info();
+ ALOGD("**** Tearing down after %s.%s\n", test_info->test_case_name(), test_info->name());
+ }
+
+protected:
+ void SetUp() override {
+ // Start the Binder thread pool.
+ ProcessState::self()->startThreadPool();
+ }
+
+ void TearDown() {
+ // Clear any dangling subscriptions from statsd.
+ if (__builtin_available(android __STATSD_SUBS_MIN_API__, *)) {
+ AStatsManager_removeSubscription(subId);
+ }
+ }
+
+ void LogTestAtomReported(int32_t intFieldValue) {
+ const BytesField bytesField(trainExpIdsBytes.data(), trainExpIdsBytes.size());
+ stats_write(TEST_ATOM_REPORTED, uids.data(), uids.size(), tags, intFieldValue,
+ /*long_field=*/2LL, /*float_field=*/3.0F,
+ /*string_field=*/string1.c_str(),
+ /*boolean_field=*/false, /*state=*/TEST_ATOM_REPORTED__REPEATED_ENUM_FIELD__OFF,
+ bytesField, repeatedInts, repeatedLongs, repeatedFloats, repeatedStrings,
+ &(repeatedBool[0]), /*repeatedBoolSize=*/2, repeatedEnums);
+ }
+
+ int32_t subId;
+
+ // TestAtomReported fields.
+ const vector<int32_t> uids = {1};
+ const string tag = "test";
+ const vector<char const*> tags = {tag.c_str()};
+
+ // 100 int64s for the MODE_BYTES field to push atom size to over 1K.
+ const vector<int64_t> expIds = vector<int64_t>(100, INT64_MAX);
+
+ const vector<int32_t> repeatedInts{1};
+ const vector<int64_t> repeatedLongs{2LL};
+ const vector<float> repeatedFloats{3.0F};
+ const string string1 = "ABC";
+ const vector<char const*> repeatedStrings = {string1.c_str()};
+ const bool repeatedBool[2] = {false, true};
+ const vector<int32_t> repeatedEnums = {TEST_ATOM_REPORTED__REPEATED_ENUM_FIELD__OFF};
+ TrainExperimentIds trainExpIds;
+ string trainExpIdsBytes;
+
+private:
+ sp<Looper> looper;
+};
+
+// Stores arguments passed in subscription callback.
+struct CallbackData {
+ int32_t subId;
+ AStatsManager_SubscriptionCallbackReason reason;
+ vector<uint8_t> payload;
+ int count; // Stores number of times the callback is invoked.
+};
+
+static void callback(int32_t subscription_id, AStatsManager_SubscriptionCallbackReason reason,
+ uint8_t* _Nonnull payload, size_t num_bytes, void* _Nullable cookie) {
+ CallbackData* data = static_cast<CallbackData*>(cookie);
+ data->subId = subscription_id;
+ data->reason = reason;
+ data->payload.assign(payload, payload + num_bytes);
+ data->count++;
+}
+
+constexpr static int WAIT_MS = 500;
+
+TEST_F(SubscriptionTest, TestSubscription) {
+ if (__builtin_available(android __STATSD_SUBS_MIN_API__, *)) {
+ ShellSubscription config;
+ config.add_pushed()->set_atom_id(TEST_ATOM_REPORTED);
+ config.add_pushed()->set_atom_id(SCREEN_BRIGHTNESS_CHANGED);
+
+ string configBytes;
+ config.SerializeToString(&configBytes);
+
+ CallbackData callbackData{/*subId=*/0,
+ ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_SUBSCRIPTION_ENDED,
+ /*payload=*/{},
+ /*count=*/0};
+
+ // Add subscription.
+ subId = AStatsManager_addSubscription(reinterpret_cast<const uint8_t*>(configBytes.data()),
+ configBytes.size(), &callback, &callbackData);
+ ASSERT_GT(subId, 0);
+ sleep_for(std::chrono::milliseconds(WAIT_MS));
+
+ // Log events without exceeding statsd cache.
+ stats_write(SCREEN_BRIGHTNESS_CHANGED, 100);
+ LogTestAtomReported(1);
+ sleep_for(std::chrono::milliseconds(WAIT_MS));
+
+ // Verify no callback occurred yet.
+ EXPECT_EQ(callbackData.subId, 0);
+ EXPECT_EQ(callbackData.reason,
+ ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_SUBSCRIPTION_ENDED);
+ EXPECT_EQ(callbackData.count, 0);
+ ASSERT_TRUE(callbackData.payload.empty());
+
+ // Log another TestAtomReported to overflow cache.
+ LogTestAtomReported(2);
+ sleep_for(std::chrono::milliseconds(WAIT_MS));
+
+ // Verify callback occurred.
+ EXPECT_EQ(callbackData.subId, subId);
+ EXPECT_EQ(callbackData.reason, ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_STATSD_INITIATED);
+ EXPECT_EQ(callbackData.count, 1);
+ ASSERT_GT(callbackData.payload.size(), 0);
+
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(callbackData.payload.data(),
+ callbackData.payload.size()));
+
+ ASSERT_GE(actualShellData.elapsed_timestamp_nanos_size(), 3);
+ EXPECT_THAT(actualShellData.elapsed_timestamp_nanos(), Each(Gt(0LL)));
+
+ ASSERT_GE(actualShellData.atom_size(), 3);
+
+ // Verify atom 1.
+ Atom expectedAtom;
+ expectedAtom.mutable_screen_brightness_changed()->set_level(100);
+ EXPECT_THAT(actualShellData.atom(0), EqAtom(expectedAtom));
+
+ // Verify atom 2.
+ expectedAtom.Clear();
+ auto* testAtomReported = expectedAtom.mutable_test_atom_reported();
+ auto* attributionNode = testAtomReported->add_attribution_node();
+ attributionNode->set_uid(uids[0]);
+ attributionNode->set_tag(tag);
+ testAtomReported->set_int_field(1);
+ testAtomReported->set_long_field(2LL);
+ testAtomReported->set_float_field(3.0F);
+ testAtomReported->set_string_field(string1);
+ testAtomReported->set_boolean_field(false);
+ testAtomReported->set_state(TestAtomReported_State_OFF);
+ *testAtomReported->mutable_bytes_field() = trainExpIds;
+ *testAtomReported->mutable_repeated_int_field() = {repeatedInts.begin(),
+ repeatedInts.end()};
+ *testAtomReported->mutable_repeated_long_field() = {repeatedLongs.begin(),
+ repeatedLongs.end()};
+ *testAtomReported->mutable_repeated_float_field() = {repeatedFloats.begin(),
+ repeatedFloats.end()};
+ *testAtomReported->mutable_repeated_string_field() = {repeatedStrings.begin(),
+ repeatedStrings.end()};
+ *testAtomReported->mutable_repeated_boolean_field() = {&repeatedBool[0],
+ &repeatedBool[0] + 2};
+ *testAtomReported->mutable_repeated_enum_field() = {repeatedEnums.begin(),
+ repeatedEnums.end()};
+ EXPECT_THAT(actualShellData.atom(1), EqAtom(expectedAtom));
+
+ // Verify atom 3.
+ testAtomReported->set_int_field(2);
+ EXPECT_THAT(actualShellData.atom(2), EqAtom(expectedAtom));
+
+ // Log another ScreenBrightnessChanged atom. No callback should occur.
+ stats_write(SCREEN_BRIGHTNESS_CHANGED, 99);
+ sleep_for(std::chrono::milliseconds(WAIT_MS));
+ EXPECT_EQ(callbackData.count, 1);
+
+ // Flush subscription. Callback should occur.
+ AStatsManager_flushSubscription(subId);
+ sleep_for(std::chrono::milliseconds(WAIT_MS));
+
+ EXPECT_EQ(callbackData.subId, subId);
+ EXPECT_EQ(callbackData.reason, ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_FLUSH_REQUESTED);
+ EXPECT_EQ(callbackData.count, 2);
+ ASSERT_GT(callbackData.payload.size(), 0);
+
+ ASSERT_TRUE(actualShellData.ParseFromArray(callbackData.payload.data(),
+ callbackData.payload.size()));
+
+ ASSERT_GE(actualShellData.elapsed_timestamp_nanos_size(), 1);
+ EXPECT_THAT(actualShellData.elapsed_timestamp_nanos(), Each(Gt(0LL)));
+
+ ASSERT_GE(actualShellData.atom_size(), 1);
+
+ // Verify atom 1.
+ expectedAtom.Clear();
+ expectedAtom.mutable_screen_brightness_changed()->set_level(99);
+ EXPECT_THAT(actualShellData.atom(0), EqAtom(expectedAtom));
+
+ // Log another ScreenBrightnessChanged atom. No callback should occur.
+ stats_write(SCREEN_BRIGHTNESS_CHANGED, 98);
+ sleep_for(std::chrono::milliseconds(WAIT_MS));
+ EXPECT_EQ(callbackData.count, 2);
+
+ // Trigger callback through cache timeout.
+ // Two 500 ms sleeps have occurred already so the total sleep is 71000 ms since last
+ // callback invocation.
+ sleep_for(std::chrono::milliseconds(70'000));
+ EXPECT_EQ(callbackData.subId, subId);
+ EXPECT_EQ(callbackData.reason, ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_STATSD_INITIATED);
+ EXPECT_EQ(callbackData.count, 3);
+ ASSERT_GT(callbackData.payload.size(), 0);
+
+ ASSERT_TRUE(actualShellData.ParseFromArray(callbackData.payload.data(),
+ callbackData.payload.size()));
+
+ ASSERT_GE(actualShellData.elapsed_timestamp_nanos_size(), 1);
+ EXPECT_THAT(actualShellData.elapsed_timestamp_nanos(), Each(Gt(0LL)));
+
+ ASSERT_GE(actualShellData.atom_size(), 1);
+
+ // Verify atom 1.
+ expectedAtom.Clear();
+ expectedAtom.mutable_screen_brightness_changed()->set_level(98);
+ EXPECT_THAT(actualShellData.atom(0), EqAtom(expectedAtom));
+
+ // Log another ScreenBrightnessChanged atom. No callback should occur.
+ stats_write(SCREEN_BRIGHTNESS_CHANGED, 97);
+ sleep_for(std::chrono::milliseconds(WAIT_MS));
+ EXPECT_EQ(callbackData.count, 3);
+
+ // End subscription. Final callback should occur.
+ AStatsManager_removeSubscription(subId);
+ sleep_for(std::chrono::milliseconds(WAIT_MS));
+
+ EXPECT_EQ(callbackData.subId, subId);
+ EXPECT_EQ(callbackData.reason,
+ ASTATSMANAGER_SUBSCRIPTION_CALLBACK_REASON_SUBSCRIPTION_ENDED);
+ EXPECT_EQ(callbackData.count, 4);
+ ASSERT_GT(callbackData.payload.size(), 0);
+
+ ASSERT_TRUE(actualShellData.ParseFromArray(callbackData.payload.data(),
+ callbackData.payload.size()));
+
+ ASSERT_GE(actualShellData.elapsed_timestamp_nanos_size(), 1);
+ EXPECT_THAT(actualShellData.elapsed_timestamp_nanos(), Each(Gt(0LL)));
+
+ ASSERT_GE(actualShellData.atom_size(), 1);
+
+ // Verify atom 1.
+ expectedAtom.Clear();
+ expectedAtom.mutable_screen_brightness_changed()->set_level(97);
+ EXPECT_THAT(actualShellData.atom(0), EqAtom(expectedAtom));
+ } else {
+ GTEST_SKIP();
+ }
+}
+
+} // namespace
+
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
diff --git a/lib/libstatssocket/Android.bp b/lib/libstatssocket/Android.bp
index 4369459..6a3c3a9 100644
--- a/lib/libstatssocket/Android.bp
+++ b/lib/libstatssocket/Android.bp
@@ -111,7 +111,10 @@
shared_libs: [
"libutils",
],
- test_suites: ["device-tests", "mts-statsd"],
+ test_suites: [
+ "device-tests",
+ "mts-statsd",
+ ],
test_config: "libstatssocket_test.xml",
//TODO(b/153588990): Remove when the build system properly separates.
//32bit and 64bit architectures.
@@ -125,4 +128,5 @@
},
},
require_root: true,
+ min_sdk_version: "30",
}
diff --git a/lib/libstatssocket/include/stats_annotations.h b/lib/libstatssocket/include/stats_annotations.h
index e812af0..2963db9 100644
--- a/lib/libstatssocket/include/stats_annotations.h
+++ b/lib/libstatssocket/include/stats_annotations.h
@@ -80,6 +80,116 @@
* Introduced in API 31.
*/
ASTATSLOG_ANNOTATION_ID_STATE_NESTED = 8,
+
+ /**
+ * Annotation ID constant to indicate the restriction category of an atom.
+ * This annotation must only be attached to the atom id. This is an int annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY = 9,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains peripheral device info.
+ * This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_PERIPHERAL_DEVICE_INFO = 10,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains app usage information.
+ * This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_APP_USAGE = 11,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains app activity information.
+ * This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_APP_ACTIVITY = 12,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains health connect information.
+ * This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_HEALTH_CONNECT = 13,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains accessibility information.
+ * This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_ACCESSIBILITY = 14,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains system search information.
+ * This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_SYSTEM_SEARCH = 15,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains user engagement information.
+ * This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_USER_ENGAGEMENT = 16,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains ambient sensing information.
+ * This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_AMBIENT_SENSING = 17,
+
+ /**
+ * Annotation ID to indicate that a field of an atom contains demographic classification
+ * information. This is a bool annotation.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_DEMOGRAPHIC_CLASSIFICATION = 18,
};
+enum AStatsLogRestrictionCategory : uint32_t {
+ /**
+ * Restriction category for atoms about diagnostics.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC = 1,
+
+ /**
+ * Restriction category for atoms about system intelligence.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_RESTRICTION_CATEGORY_SYSTEM_INTELLIGENCE = 2,
+
+ /**
+ * Restriction category for atoms about authentication.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_RESTRICTION_CATEGORY_AUTHENTICATION = 3,
+
+ /**
+ * Restriction category for atoms about fraud and abuse.
+ *
+ * Introduced in API 34.
+ */
+ ASTATSLOG_RESTRICTION_CATEGORY_FRAUD_AND_ABUSE = 4,
+
+};
__END_DECLS
diff --git a/lib/libstatssocket/statsd_writer.c b/lib/libstatssocket/statsd_writer.c
index c00c978..2c9bb25 100644
--- a/lib/libstatssocket/statsd_writer.c
+++ b/lib/libstatssocket/statsd_writer.c
@@ -76,7 +76,7 @@
static int statsdOpen();
static void statsdClose();
static int statsdWrite(struct timespec* ts, struct iovec* vec, size_t nr);
-static void statsdNoteDrop();
+static void statsdNoteDrop(int error, int tag);
static int statsdIsClosed();
struct android_log_transport_write statsdLoggerWrite = {
@@ -107,7 +107,7 @@
if (sock < 0) {
ret = -errno;
} else {
- int sndbuf = 1 * 1024 * 1024; // set max send buffer size 1MB
+ const int sndbuf = 2 * 1024 * 1024; // set max send buffer size 2MB
socklen_t bufLen = sizeof(sndbuf);
// SO_RCVBUF does not have an effect on unix domain socket, but SO_SNDBUF does.
// Proceed to connect even setsockopt fails.
diff --git a/service/Android.bp b/service/Android.bp
index bad74ce..b642a0a 100644
--- a/service/Android.bp
+++ b/service/Android.bp
@@ -25,6 +25,7 @@
java_library {
name: "service-statsd",
+ defaults: ["standalone-system-server-module-optimize-defaults"],
srcs: [ ":service-statsd-sources" ],
sdk_version: "system_server_current",
libs: [
@@ -32,11 +33,11 @@
// Use the implementation library directly.
// TODO(b/204183608): Remove when no longer necessary.
"framework-statsd.impl",
+ "framework-configinfrastructure",
],
static_libs: [
"modules-utils-build",
],
- plugins: ["java_api_finder"],
lint: {
strict_updatability_linting: true
},
diff --git a/service/java/com/android/server/stats/StatsCompanion.java b/service/java/com/android/server/stats/StatsCompanion.java
index dc477a5..d0954ea 100644
--- a/service/java/com/android/server/stats/StatsCompanion.java
+++ b/service/java/com/android/server/stats/StatsCompanion.java
@@ -112,6 +112,7 @@
private static final int CODE_DATA_BROADCAST = 1;
private static final int CODE_ACTIVE_CONFIGS_BROADCAST = 1;
private static final int CODE_SUBSCRIBER_BROADCAST = 1;
+ private static final int CODE_RESTRICTED_METRICS_BROADCAST = 1;
private final PendingIntent mPendingIntent;
private final Context mContext;
@@ -184,5 +185,24 @@
+ "; presumably it had been cancelled.");
}
}
+
+ @Override
+ public void sendRestrictedMetricsChangedBroadcast(long[] metricIds) {
+ enforceStatsdCallingUid();
+ Intent intent = new Intent();
+ intent.putExtra(StatsManager.EXTRA_STATS_RESTRICTED_METRIC_IDS, metricIds);
+ try {
+ mPendingIntent.send(mContext, CODE_RESTRICTED_METRICS_BROADCAST, intent, null,
+ null);
+ if (DEBUG) {
+ Log.d(TAG,
+ "Sent restricted metrics broadcast with metric ids " + Arrays.toString(
+ metricIds));
+ }
+ } catch (PendingIntent.CanceledException e) {
+ Log.w(TAG,
+ "Unable to send restricted metrics changed broadcast using PendingIntent");
+ }
+ }
}
}
diff --git a/service/java/com/android/server/stats/StatsCompanionService.java b/service/java/com/android/server/stats/StatsCompanionService.java
index bc7e25e..367bfdd 100644
--- a/service/java/com/android/server/stats/StatsCompanionService.java
+++ b/service/java/com/android/server/stats/StatsCompanionService.java
@@ -103,8 +103,6 @@
public static final int DEATH_THRESHOLD = 10;
- private static final String INCLUDE_CERTIFICATE_HASH = "include_certificate_hash";
-
private final Context mContext;
private final AlarmManager mAlarmManager;
@GuardedBy("sStatsdLock")
@@ -160,7 +158,7 @@
String installerPackageName = null;
if (installSourceInfo != null) {
installerPackageName = installSourceInfo.getInitiatingPackageName();
- if (installerPackageName == null) {
+ if (installerPackageName == null || installerPackageName.equals("com.android.shell")) {
installerPackageName = installSourceInfo.getInstallingPackageName();
}
}
@@ -207,6 +205,7 @@
backgroundThread.start();
Handler handler = new Handler(backgroundThread.getLooper());
handler.post(() -> {
+ if (DEBUG) Log.d(TAG, "Start thread for sending uid map data.");
UserManager um = (UserManager) context.getSystemService(Context.USER_SERVICE);
PackageManager pm = context.getPackageManager();
final List<UserHandle> users = um.getUserHandles(true);
@@ -227,8 +226,7 @@
} catch (IOException e) {
Log.e(TAG, "Failed to close the read side of the pipe.", e);
}
- final ParcelFileDescriptor writeFd = fds[1];
- FileOutputStream fout = new ParcelFileDescriptor.AutoCloseOutputStream(writeFd);
+ FileOutputStream fout = new ParcelFileDescriptor.AutoCloseOutputStream(fds[1]);
try {
ProtoOutputStream output = new ProtoOutputStream(fout);
int numRecords = 0;
@@ -263,15 +261,12 @@
| ProtoOutputStream.FIELD_COUNT_SINGLE
| INSTALLER_FIELD_ID,
installer);
- if (DeviceConfig.getBoolean(
- NAMESPACE_STATSD_JAVA, INCLUDE_CERTIFICATE_HASH, false)) {
- final byte[] certHash = getPackageCertificateHash(
- packagesPlusApex.get(j).signingInfo);
- output.write(ProtoOutputStream.FIELD_TYPE_BYTES
- | ProtoOutputStream.FIELD_COUNT_SINGLE
- | CERTIFICATE_HASH_FIELD_ID,
- certHash);
- }
+ final byte[] certHash =
+ getPackageCertificateHash(packagesPlusApex.get(j).signingInfo);
+ output.write(ProtoOutputStream.FIELD_TYPE_BYTES
+ | ProtoOutputStream.FIELD_COUNT_SINGLE
+ | CERTIFICATE_HASH_FIELD_ID,
+ certHash);
numRecords++;
output.end(applicationInfoToken);
@@ -283,9 +278,9 @@
Log.d(TAG, "Sent data for " + numRecords + " apps");
}
} finally {
+ if (DEBUG) Log.d(TAG, "End thread for sending uid map data.");
FileUtils.closeQuietly(fout);
backgroundThread.quit();
- backgroundThread.interrupt();
}
});
}
@@ -297,7 +292,8 @@
List<PackageInfo> allPackages = new ArrayList<>(
pm.getInstalledPackagesAsUser(PackageManager.GET_SIGNING_CERTIFICATES
| PackageManager.MATCH_UNINSTALLED_PACKAGES
- | PackageManager.MATCH_ANY_USER,
+ | PackageManager.MATCH_ANY_USER
+ | PackageManager.MATCH_STATIC_SHARED_AND_SDK_LIBRARIES,
userHandle.getIdentifier()));
// We make a second query to package manager for the apex modules because package manager
// returns both installed and uninstalled apexes with
@@ -376,11 +372,7 @@
final String installer = getInstallerPackageName(pm, app);
// Get Package certificate hash.
- byte[] certHash = new byte[0];
- if (DeviceConfig.getBoolean(
- NAMESPACE_STATSD_JAVA, INCLUDE_CERTIFICATE_HASH, false)) {
- certHash = getPackageCertificateHash(pi.signingInfo);
- }
+ byte[] certHash = getPackageCertificateHash(pi.signingInfo);
sStatsd.informOnePackage(
app,
@@ -673,13 +665,6 @@
private void onPropertiesChanged(final Properties properties) {
updateProperties(properties);
-
- // Re-fetch package information with package certificates if include_certificate_hash
- // property changed.
- final Set<String> propertyNames = properties.getKeyset();
- if (propertyNames.contains(INCLUDE_CERTIFICATE_HASH)) {
- informAllUids(mContext);
- }
}
private void updateProperties(final Properties properties) {
@@ -710,7 +695,7 @@
try {
statsd.updateProperties(propertyParcels);
} catch (RemoteException e) {
- Log.w(TAG, "Failed to inform statsd of an include app certificate flag update", e);
+ Log.w(TAG, "Failed to inform statsd of updated statsd_java properties", e);
}
}
diff --git a/service/java/com/android/server/stats/StatsManagerService.java b/service/java/com/android/server/stats/StatsManagerService.java
index 1e3846b..c518f50 100644
--- a/service/java/com/android/server/stats/StatsManagerService.java
+++ b/service/java/com/android/server/stats/StatsManagerService.java
@@ -26,6 +26,7 @@
import android.os.Binder;
import android.os.IPullAtomCallback;
import android.os.IStatsManagerService;
+import android.os.IStatsQueryCallback;
import android.os.IStatsd;
import android.os.PowerManager;
import android.os.Process;
@@ -66,6 +67,9 @@
@GuardedBy("mLock")
private ArrayMap<ConfigKey, ArrayMap<Long, PendingIntentRef>> mBroadcastSubscriberPirMap =
new ArrayMap<>();
+ @GuardedBy("mLock")
+ private ArrayMap<ConfigKeyWithPackage, ArrayMap<Integer, PendingIntentRef>>
+ mRestrictedMetricsPirMap = new ArrayMap<>();
public StatsManagerService(Context context) {
super();
@@ -104,6 +108,39 @@
}
}
+ private static class ConfigKeyWithPackage {
+ private final String mConfigPackage;
+ private final long mConfigId;
+
+ ConfigKeyWithPackage(String configPackage, long configId) {
+ mConfigPackage = configPackage;
+ mConfigId = configId;
+ }
+
+ public String getConfigPackage() {
+ return mConfigPackage;
+ }
+
+ public long getConfigId() {
+ return mConfigId;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(mConfigPackage, mConfigId);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof ConfigKeyWithPackage) {
+ ConfigKeyWithPackage other = (ConfigKeyWithPackage) obj;
+ return this.mConfigPackage.equals(other.getConfigPackage())
+ && this.mConfigId == other.getConfigId();
+ }
+ return false;
+ }
+ }
+
private static class PullerKey {
private final int mUid;
private final int mAtomTag;
@@ -477,10 +514,97 @@
throw new IllegalStateException("Failed to connect to statsd to removeConfig");
}
+ @Override
+ public long[] setRestrictedMetricsChangedOperation(PendingIntent pendingIntent,
+ long configId, String configPackage) {
+ enforceRestrictedStatsPermission();
+ int callingUid = Binder.getCallingUid();
+ final long token = Binder.clearCallingIdentity();
+ PendingIntentRef pir = new PendingIntentRef(pendingIntent, mContext);
+ ConfigKeyWithPackage key = new ConfigKeyWithPackage(configPackage, configId);
+ // Add the PIR to a map so we can re-register if statsd is unavailable.
+ synchronized (mLock) {
+ ArrayMap<Integer, PendingIntentRef> innerMap = mRestrictedMetricsPirMap.getOrDefault(
+ key, new ArrayMap<>());
+ innerMap.put(callingUid, pir);
+ mRestrictedMetricsPirMap.put(key, innerMap);
+ }
+ try {
+ IStatsd statsd = getStatsdNonblocking();
+ if (statsd != null) {
+ return statsd.setRestrictedMetricsChangedOperation(configId, configPackage, pir,
+ callingUid);
+ }
+ } catch (RemoteException e) {
+ Log.e(TAG, "Failed to setRestrictedMetricsChangedOperation with statsd");
+ } finally {
+ Binder.restoreCallingIdentity(token);
+ }
+ return new long[]{};
+ }
+
+ @Override
+ public void removeRestrictedMetricsChangedOperation(long configId, String configPackage) {
+ enforceRestrictedStatsPermission();
+ int callingUid = Binder.getCallingUid();
+ final long token = Binder.clearCallingIdentity();
+ ConfigKeyWithPackage key = new ConfigKeyWithPackage(configPackage, configId);
+ synchronized (mLock) {
+ ArrayMap<Integer, PendingIntentRef> innerMap = mRestrictedMetricsPirMap.getOrDefault(
+ key, new ArrayMap<>());
+ innerMap.remove(callingUid);
+ if (innerMap.isEmpty()) {
+ mRestrictedMetricsPirMap.remove(key);
+ }
+ }
+ try {
+ IStatsd statsd = getStatsdNonblocking();
+ if (statsd != null) {
+ statsd.removeRestrictedMetricsChangedOperation(configId, configPackage, callingUid);
+ }
+ } catch (RemoteException e) {
+ Log.e(TAG, "Failed to removeRestrictedMetricsChangedOperation with statsd");
+ } finally {
+ Binder.restoreCallingIdentity(token);
+ }
+ }
+
+ @Override
+ public void querySql(String sqlQuery, int minSqlClientVersion, byte[] policyConfig,
+ IStatsQueryCallback queryCallback, long configKey, String configPackage) {
+ int callingUid = Binder.getCallingUid();
+ enforceRestrictedStatsPermission();
+ final long token = Binder.clearCallingIdentity();
+ try {
+ IStatsd statsd = waitForStatsd();
+ if (statsd != null) {
+ statsd.querySql(
+ sqlQuery,
+ minSqlClientVersion,
+ policyConfig,
+ queryCallback,
+ configKey,
+ configPackage,
+ callingUid);
+ } else {
+ queryCallback.sendFailure("Could not connect to statsd from system server");
+ }
+ } catch (RemoteException e) {
+ throw new IllegalStateException(e.getMessage(), e);
+ } finally {
+ Binder.restoreCallingIdentity(token);
+ }
+ }
+
void setStatsCompanionService(StatsCompanionService statsCompanionService) {
mStatsCompanionService = statsCompanionService;
}
+ /** Checks that the caller has READ_RESTRICTED_STATS permission. */
+ private void enforceRestrictedStatsPermission() {
+ mContext.enforceCallingPermission(Manifest.permission.READ_RESTRICTED_STATS, null);
+ }
+
/**
* Checks that the caller has both DUMP and PACKAGE_USAGE_STATS permissions. Also checks that
* the caller has USAGE_STATS_PERMISSION_OPS for the specified packageName if it is not null.
@@ -589,6 +713,8 @@
registerAllDataFetchOperations(statsd);
registerAllActiveConfigsChangedOperations(statsd);
registerAllBroadcastSubscribers(statsd);
+ registerAllRestrictedMetricsChangedOperations(statsd);
+ // TODO (b/269419485): register all restricted metric operations.
} catch (RemoteException e) {
Log.e(TAG, "StatsManager failed to (re-)register data with statsd");
} finally {
@@ -657,7 +783,7 @@
}
for (Map.Entry<ConfigKey, ArrayMap<Long, PendingIntentRef>> entry :
- mBroadcastSubscriberPirMap.entrySet()) {
+ broadcastSubscriberCopy.entrySet()) {
ConfigKey configKey = entry.getKey();
for (Map.Entry<Long, PendingIntentRef> subscriberEntry : entry.getValue().entrySet()) {
statsd.setBroadcastSubscriber(configKey.getConfigId(), subscriberEntry.getKey(),
@@ -665,4 +791,28 @@
}
}
}
+
+ // Pre-condition: the Binder calling identity has already been cleared
+ private void registerAllRestrictedMetricsChangedOperations(IStatsd statsd)
+ throws RemoteException {
+ // Since we do not want to make an IPC with the lock held, we first create a deep copy of
+ // the data with the lock held before iterating through the map.
+ ArrayMap<ConfigKeyWithPackage, ArrayMap<Integer, PendingIntentRef>> restrictedMetricsCopy =
+ new ArrayMap<>();
+ synchronized (mLock) {
+ for (Map.Entry<ConfigKeyWithPackage, ArrayMap<Integer, PendingIntentRef>> entry :
+ mRestrictedMetricsPirMap.entrySet()) {
+ restrictedMetricsCopy.put(entry.getKey(), new ArrayMap(entry.getValue()));
+ }
+ }
+
+ for (Map.Entry<ConfigKeyWithPackage, ArrayMap<Integer, PendingIntentRef>> entry :
+ restrictedMetricsCopy.entrySet()) {
+ ConfigKeyWithPackage configKey = entry.getKey();
+ for (Map.Entry<Integer, PendingIntentRef> uidEntry : entry.getValue().entrySet()) {
+ statsd.setRestrictedMetricsChangedOperation(configKey.getConfigId(),
+ configKey.getConfigPackage(), uidEntry.getValue(), uidEntry.getKey());
+ }
+ }
+ }
}
diff --git a/statsd/Android.bp b/statsd/Android.bp
index f9463ba..db3f28b 100644
--- a/statsd/Android.bp
+++ b/statsd/Android.bp
@@ -21,6 +21,10 @@
cc_defaults {
name: "statsd_defaults",
+ cflags: [
+ "-Wno-deprecated-declarations",
+ ],
+
srcs: [
"src/active_config_list.proto",
"src/anomaly/AlarmMonitor.cpp",
@@ -60,6 +64,7 @@
"src/metrics/duration_helper/OringDurationTracker.cpp",
"src/metrics/DurationMetricProducer.cpp",
"src/metrics/EventMetricProducer.cpp",
+ "src/metrics/RestrictedEventMetricProducer.cpp",
"src/metrics/GaugeMetricProducer.cpp",
"src/metrics/KllMetricProducer.cpp",
"src/metrics/MetricProducer.cpp",
@@ -71,13 +76,16 @@
"src/packages/UidMap.cpp",
"src/shell/shell_config.proto",
"src/shell/ShellSubscriber.cpp",
+ "src/shell/ShellSubscriberClient.cpp",
"src/socket/StatsSocketListener.cpp",
"src/state/StateManager.cpp",
"src/state/StateTracker.cpp",
"src/stats_log_util.cpp",
+ "src/stats_policy_config.proto",
"src/statscompanion_util.cpp",
"src/statsd_config.proto",
"src/statsd_metadata.proto",
+ "src/guardrail/invalid_config_reason_enum.proto",
"src/StatsLogProcessor.cpp",
"src/StatsService.cpp",
"src/storage/StorageManager.cpp",
@@ -85,6 +93,9 @@
"src/subscriber/SubscriberReporter.cpp",
"src/uid_data.proto",
"src/utils/MultiConditionTrigger.cpp",
+ "src/utils/DbUtils.cpp",
+ "src/utils/RestrictedPolicyManager.cpp",
+ "src/utils/ShardOffsetProvider.cpp",
],
local_include_dirs: [
@@ -102,6 +113,7 @@
"libutils",
"server_configurable_flags",
"statsd-aidl-ndk",
+ "libsqlite_static_noicu",
],
shared_libs: [
"libbinder_ndk",
@@ -237,7 +249,10 @@
cc_test {
name: "statsd_test",
defaults: ["statsd_defaults"],
- test_suites: ["device-tests", "mts-statsd"],
+ test_suites: [
+ "device-tests",
+ "mts-statsd",
+ ],
test_config: "statsd_test.xml",
//TODO(b/153588990): Remove when the build system properly separates
@@ -264,6 +279,43 @@
require_root: true,
+ tidy_timeout_srcs: [
+ "tests/condition/SimpleConditionTracker_test.cpp",
+ "tests/ConfigManager_test.cpp",
+ "tests/e2e/Anomaly_count_e2e_test.cpp",
+ "tests/e2e/Anomaly_duration_sum_e2e_test.cpp",
+ "tests/e2e/ConfigUpdate_e2e_ab_test.cpp",
+ "tests/e2e/ConfigUpdate_e2e_test.cpp",
+ "tests/e2e/CountMetric_e2e_test.cpp",
+ "tests/e2e/DurationMetric_e2e_test.cpp",
+ "tests/e2e/GaugeMetric_e2e_pull_test.cpp",
+ "tests/e2e/MetricActivation_e2e_test.cpp",
+ "tests/e2e/PartialBucket_e2e_test.cpp",
+ "tests/e2e/ValueMetric_pull_e2e_test.cpp",
+ "tests/e2e/WakelockDuration_e2e_test.cpp",
+ "tests/external/puller_util_test.cpp",
+ "tests/external/StatsPuller_test.cpp",
+ "tests/FieldValue_test.cpp",
+ "tests/guardrail/StatsdStats_test.cpp",
+ "tests/LogEvent_test.cpp",
+ "tests/metrics/CountMetricProducer_test.cpp",
+ "tests/metrics/DurationMetricProducer_test.cpp",
+ "tests/metrics/EventMetricProducer_test.cpp",
+ "tests/metrics/GaugeMetricProducer_test.cpp",
+ "tests/metrics/KllMetricProducer_test.cpp",
+ "tests/metrics/MaxDurationTracker_test.cpp",
+ "tests/metrics/NumericValueMetricProducer_test.cpp",
+ "tests/metrics/OringDurationTracker_test.cpp",
+ "tests/metrics/RestrictedEventMetricProducer_test.cpp",
+ "tests/MetricsManager_test.cpp",
+ "tests/metrics/parsing_utils/config_update_utils_test.cpp",
+ "tests/metrics/parsing_utils/metrics_manager_util_test.cpp",
+ "tests/state/StateTracker_test.cpp",
+ "tests/statsd_test_util.cpp",
+ "tests/StatsLogProcessor_test.cpp",
+ "tests/UidMap_test.cpp",
+ ],
+
srcs: [
// atom_field_options.proto needs field_options.proto, but that is
// not included in libprotobuf-cpp-lite, so compile it here.
@@ -295,8 +347,10 @@
"tests/e2e/MetricActivation_e2e_test.cpp",
"tests/e2e/MetricConditionLink_e2e_test.cpp",
"tests/e2e/PartialBucket_e2e_test.cpp",
+ "tests/e2e/RestrictedConfig_e2e_test.cpp",
"tests/e2e/ValueMetric_pull_e2e_test.cpp",
"tests/e2e/WakelockDuration_e2e_test.cpp",
+ "tests/e2e/RestrictedEventMetric_e2e_test.cpp",
"tests/external/puller_util_test.cpp",
"tests/external/StatsCallbackPuller_test.cpp",
"tests/external/StatsPuller_test.cpp",
@@ -319,23 +373,28 @@
"tests/metrics/metrics_test_helper.cpp",
"tests/metrics/OringDurationTracker_test.cpp",
"tests/metrics/NumericValueMetricProducer_test.cpp",
+ "tests/metrics/RestrictedEventMetricProducer_test.cpp",
"tests/metrics/parsing_utils/config_update_utils_test.cpp",
"tests/metrics/parsing_utils/metrics_manager_util_test.cpp",
"tests/subscriber/SubscriberReporter_test.cpp",
+ "tests/LogEventFilter_test.cpp",
"tests/MetricsManager_test.cpp",
"tests/shell/ShellSubscriber_test.cpp",
"tests/state/StateTracker_test.cpp",
"tests/statsd_test_util.cpp",
+ "tests/statsd_test_util_test.cpp",
+ "tests/SocketListener_test.cpp",
"tests/StatsLogProcessor_test.cpp",
"tests/StatsService_test.cpp",
"tests/storage/StorageManager_test.cpp",
"tests/UidMap_test.cpp",
"tests/utils/MultiConditionTrigger_test.cpp",
+ "tests/utils/DbUtils_test.cpp",
],
static_libs: [
"libgmock",
- "libplatformprotos",
+ "libstatsgtestmatchers",
"libstatslog_statsdtest",
"libstatssocket_private",
],
@@ -346,7 +405,9 @@
"external/protobuf/src",
"frameworks/proto_logging/stats",
],
+ static: true,
},
+ min_sdk_version: "30",
}
@@ -364,11 +425,13 @@
":libprotobuf-internal-protos",
":libstats_internal_protos",
+ "benchmark/db_benchmark.cpp",
"benchmark/duration_metric_benchmark.cpp",
"benchmark/filter_value_benchmark.cpp",
"benchmark/get_dimensions_for_condition_benchmark.cpp",
"benchmark/hello_world_benchmark.cpp",
"benchmark/log_event_benchmark.cpp",
+ "benchmark/log_event_filter_benchmark.cpp",
"benchmark/main.cpp",
"benchmark/metric_util.cpp",
"benchmark/stats_write_benchmark.cpp",
@@ -427,6 +490,7 @@
"src/shell/shell_data.proto",
"src/stats_log.proto",
"src/statsd_config.proto",
+ "src/guardrail/invalid_config_reason_enum.proto",
],
static_libs: [
@@ -455,6 +519,7 @@
"src/shell/shell_data.proto",
"src/stats_log.proto",
"src/statsd_config.proto",
+ "src/guardrail/invalid_config_reason_enum.proto",
],
static_libs: [
"platformprotosnano",
@@ -467,10 +532,19 @@
// Filegroup for statsd config proto definition.
filegroup {
- name: "statsd-config-proto-def",
+ name: "libstats_config_protos",
srcs: ["src/statsd_config.proto"],
}
+// Filegroup for statsd report protos.
+filegroup {
+ name: "libstats_log_protos",
+ srcs: [
+ "src/stats_log.proto",
+ "src/guardrail/invalid_config_reason_enum.proto",
+ ],
+}
+
// Filegroup for all statsd protos
filegroup {
name: "statsd_internal_protos",
@@ -483,5 +557,18 @@
"src/statsd_metadata.proto",
"src/stats_log.proto",
"src/uid_data.proto",
+ "src/guardrail/invalid_config_reason_enum.proto",
],
}
+
+// Filegroup for subscription protos.
+filegroup {
+ name: "libstats_subscription_protos",
+ srcs: [
+ ":libstats_internal_protos",
+ ":libstats_config_protos",
+ "src/shell/shell_config.proto",
+ "src/shell/shell_data.proto",
+ ],
+}
+
diff --git a/statsd/benchmark/db_benchmark.cpp b/statsd/benchmark/db_benchmark.cpp
new file mode 100644
index 0000000..ebe751a
--- /dev/null
+++ b/statsd/benchmark/db_benchmark.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "benchmark/benchmark.h"
+#include "metric_util.h"
+#include "utils/DbUtils.h"
+
+using namespace std;
+
+namespace android {
+namespace os {
+namespace statsd {
+namespace dbutils {
+
+static void BM_insertAtomsIntoDbTablesNewConnection(benchmark::State& state) {
+ ConfigKey key = ConfigKey(111, 222);
+ int64_t metricId = 0;
+ int64_t bucketStartTimeNs = 10000000000;
+
+ unique_ptr<LogEvent> event =
+ CreateScreenStateChangedEvent(bucketStartTimeNs, android::view::DISPLAY_STATE_OFF);
+ vector<LogEvent> logEvents;
+ for (int j = 0; j < state.range(1); ++j) {
+ logEvents.push_back(*event.get());
+ }
+ string err;
+ for (auto s : state) {
+ for (int metricId = 0; metricId < state.range(0); ++metricId) {
+ state.PauseTiming();
+ deleteDb(key);
+ createTableIfNeeded(key, metricId, *event.get());
+ state.ResumeTiming();
+ insert(key, metricId, logEvents, err);
+ }
+ }
+ deleteDb(key);
+}
+
+BENCHMARK(BM_insertAtomsIntoDbTablesNewConnection)
+ ->Args({1, 10})
+ ->Args({1, 50})
+ ->Args({1, 100})
+ ->Args({1, 500})
+ ->Args({10, 10})
+ ->Args({10, 20});
+
+static void BM_insertAtomsIntoDbTablesReuseConnection(benchmark::State& state) {
+ ConfigKey key = ConfigKey(111, 222);
+ int64_t metricId = 0;
+ int64_t bucketStartTimeNs = 10000000000;
+
+ unique_ptr<LogEvent> event =
+ CreateScreenStateChangedEvent(bucketStartTimeNs, android::view::DISPLAY_STATE_OFF);
+ vector<LogEvent> logEvents;
+ for (int j = 0; j < state.range(1); ++j) {
+ logEvents.push_back(*event.get());
+ }
+ sqlite3* dbHandle = getDb(key);
+ string err;
+ for (auto s : state) {
+ for (int metricId = 0; metricId < state.range(0); ++metricId) {
+ state.PauseTiming();
+ deleteTable(key, metricId);
+ createTableIfNeeded(key, metricId, *event.get());
+ state.ResumeTiming();
+ insert(key, metricId, logEvents, err);
+ }
+ }
+ closeDb(dbHandle);
+ deleteDb(key);
+}
+
+BENCHMARK(BM_insertAtomsIntoDbTablesReuseConnection)
+ ->Args({1, 10})
+ ->Args({1, 50})
+ ->Args({1, 100})
+ ->Args({1, 500})
+ ->Args({10, 10})
+ ->Args({10, 20});
+
+static void BM_createDbTables(benchmark::State& state) {
+ ConfigKey key = ConfigKey(111, 222);
+ int64_t metricId = 0;
+ int64_t bucketStartTimeNs = 10000000000;
+
+ unique_ptr<LogEvent> event =
+ CreateScreenStateChangedEvent(bucketStartTimeNs, android::view::DISPLAY_STATE_OFF);
+ vector<LogEvent> logEvents{*event.get()};
+ string err;
+ for (auto s : state) {
+ state.PauseTiming();
+ deleteTable(key, metricId);
+ state.ResumeTiming();
+ createTableIfNeeded(key, metricId, *event.get());
+ insert(key, metricId, logEvents, err);
+ }
+ deleteDb(key);
+}
+
+BENCHMARK(BM_createDbTables);
+} // namespace dbutils
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/benchmark/log_event_benchmark.cpp b/statsd/benchmark/log_event_benchmark.cpp
index 057e00b..419f323 100644
--- a/statsd/benchmark/log_event_benchmark.cpp
+++ b/statsd/benchmark/log_event_benchmark.cpp
@@ -22,11 +22,19 @@
namespace os {
namespace statsd {
-static size_t createAndParseStatsEvent(uint8_t* msg) {
+static void writeEventTestFields(AStatsEvent& event) {
+ AStatsEvent_writeInt64(&event, 3L);
+ AStatsEvent_writeInt32(&event, 2);
+ AStatsEvent_writeFloat(&event, 2.0);
+ AStatsEvent_writeString(&event, "DemoStringValue");
+}
+
+static size_t createStatsEvent(uint8_t* msg, int numElements = 1) {
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
- AStatsEvent_writeInt32(event, 2);
- AStatsEvent_writeFloat(event, 2.0);
+ for (int i = 0; i < numElements; i++) {
+ writeEventTestFields(*event);
+ }
AStatsEvent_build(event);
size_t size;
@@ -35,9 +43,21 @@
return size;
}
+static size_t createStatsEventMedium(uint8_t* msg) {
+ return createStatsEvent(msg, 5);
+}
+
+static size_t createStatsEventLarge(uint8_t* msg) {
+ return createStatsEvent(msg, 10);
+}
+
+static size_t createStatsEventExtraLarge(uint8_t* msg) {
+ return createStatsEvent(msg, 40);
+}
+
static void BM_LogEventCreation(benchmark::State& state) {
uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
- size_t size = createAndParseStatsEvent(msg);
+ size_t size = createStatsEvent(msg);
while (state.KeepRunning()) {
LogEvent event(/*uid=*/ 1000, /*pid=*/ 1001);
benchmark::DoNotOptimize(event.parseBuffer(msg, size));
@@ -45,6 +65,147 @@
}
BENCHMARK(BM_LogEventCreation);
+static void BM_LogEventCreationWithPrefetch(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEvent(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ // explicitly parse header first
+ const LogEvent::BodyBufferInfo header = event.parseHeader(msg, size);
+
+ // explicitly parse body using the header
+ benchmark::DoNotOptimize(event.parseBody(header));
+ }
+}
+BENCHMARK(BM_LogEventCreationWithPrefetch);
+
+static void BM_LogEventCreationWithPrefetchOnly(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEvent(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ // explicitly parse header only and skip the body
+ benchmark::DoNotOptimize(event.parseHeader(msg, size));
+ }
+}
+BENCHMARK(BM_LogEventCreationWithPrefetchOnly);
+
+static void BM_LogEventCreationMedium(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventMedium(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ benchmark::DoNotOptimize(event.parseBuffer(msg, size));
+ }
+}
+BENCHMARK(BM_LogEventCreationMedium);
+
+static void BM_LogEventCreationMediumWithPrefetch(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventMedium(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ // explicitly parse header first
+ const LogEvent::BodyBufferInfo header = event.parseHeader(msg, size);
+
+ // explicitly parse body using the header
+ benchmark::DoNotOptimize(event.parseBody(header));
+ }
+}
+BENCHMARK(BM_LogEventCreationMediumWithPrefetch);
+
+static void BM_LogEventCreationMediumWithPrefetchOnly(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventMedium(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ // explicitly parse header only and skip the body
+ benchmark::DoNotOptimize(event.parseHeader(msg, size));
+ }
+}
+BENCHMARK(BM_LogEventCreationMediumWithPrefetchOnly);
+
+static void BM_LogEventCreationLarge(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventLarge(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ benchmark::DoNotOptimize(event.parseBuffer(msg, size));
+ }
+}
+BENCHMARK(BM_LogEventCreationLarge);
+
+static void BM_LogEventCreationLargeWithPrefetch(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventLarge(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ // explicitly parse header first
+ const LogEvent::BodyBufferInfo header = event.parseHeader(msg, size);
+
+ // explicitly parse body using the header
+ benchmark::DoNotOptimize(event.parseBody(header));
+ }
+}
+BENCHMARK(BM_LogEventCreationLargeWithPrefetch);
+
+static void BM_LogEventCreationLargeWithPrefetchOnly(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventLarge(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ // explicitly parse header only and skip the body
+ benchmark::DoNotOptimize(event.parseHeader(msg, size));
+ }
+}
+BENCHMARK(BM_LogEventCreationLargeWithPrefetchOnly);
+
+static void BM_LogEventCreationExtraLarge(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventExtraLarge(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ benchmark::DoNotOptimize(event.parseBuffer(msg, size));
+ }
+}
+BENCHMARK(BM_LogEventCreationExtraLarge);
+
+static void BM_LogEventCreationExtraLargeWithPrefetch(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventExtraLarge(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ // explicitly parse header first
+ const LogEvent::BodyBufferInfo header = event.parseHeader(msg, size);
+
+ // explicitly parse body using the header
+ benchmark::DoNotOptimize(event.parseBody(header));
+ }
+}
+BENCHMARK(BM_LogEventCreationExtraLargeWithPrefetch);
+
+static void BM_LogEventCreationExtraLargeWithPrefetchOnly(benchmark::State& state) {
+ uint8_t msg[LOGGER_ENTRY_MAX_PAYLOAD];
+ size_t size = createStatsEventExtraLarge(msg);
+ while (state.KeepRunning()) {
+ LogEvent event(/*uid=*/1000, /*pid=*/1001);
+
+ // explicitly parse header only and skip the body
+ benchmark::DoNotOptimize(event.parseHeader(msg, size));
+ }
+}
+BENCHMARK(BM_LogEventCreationExtraLargeWithPrefetchOnly);
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/benchmark/log_event_filter_benchmark.cpp b/statsd/benchmark/log_event_filter_benchmark.cpp
new file mode 100644
index 0000000..b65e88f
--- /dev/null
+++ b/statsd/benchmark/log_event_filter_benchmark.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <random>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "socket/LogEventFilter.h"
+
+namespace android {
+namespace os {
+namespace statsd {
+
+namespace {
+
+constexpr int kAtomIdsCount = 500; // Filter size setup
+constexpr int kAtomIdsSampleCount = 3000; // Queries number
+
+std::vector<int> generateSampleAtomIdsList() {
+ std::vector<int> atomIds(kAtomIdsSampleCount);
+
+ std::default_random_engine generator;
+
+ // Get atoms ids which are not in the filter to test behavior when set is searched for an
+ // an absent key
+ // Expected atoms ids are in a range 1..3000, random & evenly distributes
+ std::uniform_int_distribution<int> distribution(1, kAtomIdsSampleCount);
+
+ for (int i = 0; i < kAtomIdsSampleCount; ++i) {
+ atomIds[i] = distribution(generator);
+ }
+
+ return atomIds;
+}
+
+template <typename T>
+T generateAtomIds() {
+ T atomIds;
+
+ std::default_random_engine generator;
+ std::uniform_int_distribution<int> distribution(1, kAtomIdsCount);
+
+ for (int i = 0; i < kAtomIdsCount; ++i) {
+ atomIds.insert(distribution(generator));
+ }
+
+ return atomIds;
+}
+
+// Used to setup filter
+const std::set<int> kAtomIdsSet = generateAtomIds<std::set<int>>();
+const std::unordered_set<int> kAtomIdsUnorderedSet = generateAtomIds<std::unordered_set<int>>();
+
+const std::set<int> kAtomIdsSet2 = generateAtomIds<std::set<int>>();
+const std::unordered_set<int> kAtomIdsUnorderedSet2 = generateAtomIds<std::unordered_set<int>>();
+
+const std::set<int> kAtomIdsSet3 = generateAtomIds<std::set<int>>();
+const std::unordered_set<int> kAtomIdsUnorderedSet3 = generateAtomIds<std::unordered_set<int>>();
+
+const std::set<int> kAtomIdsSet4 = generateAtomIds<std::set<int>>();
+const std::unordered_set<int> kAtomIdsUnorderedSet4 = generateAtomIds<std::unordered_set<int>>();
+
+// Used to perform sample quieries
+const std::vector<int> kSampleIdsList = generateSampleAtomIdsList();
+
+} // namespace
+
+static void BM_LogEventFilterUnorderedSet(benchmark::State& state) {
+ while (state.KeepRunning()) {
+ LogEventFilter eventFilter;
+ // populate
+ eventFilter.setAtomIds(kAtomIdsUnorderedSet, nullptr);
+ // many fetches
+ for (const auto& atomId : kSampleIdsList) {
+ benchmark::DoNotOptimize(eventFilter.isAtomInUse(atomId));
+ }
+ }
+}
+BENCHMARK(BM_LogEventFilterUnorderedSet);
+
+static void BM_LogEventFilterUnorderedSet2Consumers(benchmark::State& state) {
+ while (state.KeepRunning()) {
+ LogEventFilter eventFilter;
+ // populate
+ eventFilter.setAtomIds(kAtomIdsUnorderedSet, &kAtomIdsUnorderedSet);
+ eventFilter.setAtomIds(kAtomIdsUnorderedSet2, &kAtomIdsUnorderedSet2);
+ eventFilter.setAtomIds(kAtomIdsUnorderedSet3, &kAtomIdsUnorderedSet);
+ eventFilter.setAtomIds(kAtomIdsUnorderedSet4, &kAtomIdsUnorderedSet2);
+ // many fetches
+ for (const auto& atomId : kSampleIdsList) {
+ benchmark::DoNotOptimize(eventFilter.isAtomInUse(atomId));
+ }
+ }
+}
+BENCHMARK(BM_LogEventFilterUnorderedSet2Consumers);
+
+static void BM_LogEventFilterSet(benchmark::State& state) {
+ while (state.KeepRunning()) {
+ LogEventFilterGeneric<std::set<int>> eventFilter;
+ // populate
+ eventFilter.setAtomIds(kAtomIdsSet, nullptr);
+ // many fetches
+ for (const auto& atomId : kSampleIdsList) {
+ benchmark::DoNotOptimize(eventFilter.isAtomInUse(atomId));
+ }
+ }
+}
+BENCHMARK(BM_LogEventFilterSet);
+
+static void BM_LogEventFilterSet2Consumers(benchmark::State& state) {
+ while (state.KeepRunning()) {
+ LogEventFilterGeneric<std::set<int>> eventFilter;
+ // populate
+ eventFilter.setAtomIds(kAtomIdsSet, &kAtomIdsSet);
+ eventFilter.setAtomIds(kAtomIdsSet2, &kAtomIdsSet2);
+ eventFilter.setAtomIds(kAtomIdsSet3, &kAtomIdsSet);
+ eventFilter.setAtomIds(kAtomIdsSet4, &kAtomIdsSet2);
+ // many fetches
+ for (const auto& atomId : kSampleIdsList) {
+ benchmark::DoNotOptimize(eventFilter.isAtomInUse(atomId));
+ }
+ }
+}
+BENCHMARK(BM_LogEventFilterSet2Consumers);
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/benchmark/metric_util.cpp b/statsd/benchmark/metric_util.cpp
index 89fd3d9..4f18298 100644
--- a/statsd/benchmark/metric_util.cpp
+++ b/statsd/benchmark/metric_util.cpp
@@ -354,10 +354,12 @@
sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
- sp<StatsLogProcessor> processor =
- new StatsLogProcessor(uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec * NS_PER_SEC, [](const ConfigKey&) { return true; },
- [](const int&, const vector<int64_t>&) { return true; });
+ sp<StatsLogProcessor> processor = new StatsLogProcessor(
+ uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
+ timeBaseSec * NS_PER_SEC, [](const ConfigKey&) { return true; },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {},
+ /*logEventFilter=*/nullptr);
processor->OnConfigUpdated(timeBaseSec * NS_PER_SEC, key, config);
return processor;
}
diff --git a/statsd/src/FieldValue.cpp b/statsd/src/FieldValue.cpp
index 406c19f..02a2c5e 100644
--- a/statsd/src/FieldValue.cpp
+++ b/statsd/src/FieldValue.cpp
@@ -16,8 +16,11 @@
#define STATSD_DEBUG false
#include "Log.h"
+
#include "FieldValue.h"
+
#include "HashableDimensionKey.h"
+#include "hash.h"
#include "math.h"
namespace android {
@@ -446,6 +449,26 @@
return size;
}
+std::string Annotations::toString() const {
+ std::string annotations;
+ if (isUidField()) {
+ annotations += "UID";
+ }
+ if (isPrimaryField()) {
+ annotations += annotations.size() > 0 ? ",PRIMARY" : "PRIMARY";
+ }
+ if (isExclusiveState()) {
+ annotations += annotations.size() > 0 ? ",EXCLUSIVE" : "EXCLUSIVE";
+ }
+ if (isNested()) {
+ annotations += annotations.size() > 0 ? ",NESTED" : "NESTED";
+ }
+ if (annotations.size()) {
+ annotations = "[" + annotations + "]";
+ }
+ return annotations;
+}
+
bool equalDimensions(const std::vector<Matcher>& dimension_a,
const std::vector<Matcher>& dimension_b) {
bool eq = dimension_a.size() == dimension_b.size();
@@ -521,6 +544,38 @@
return totalSize;
}
+bool shouldKeepSample(const FieldValue& sampleFieldValue, int shardOffset, int shardCount) {
+ int hashValue = 0;
+ switch (sampleFieldValue.mValue.type) {
+ case INT:
+ hashValue = Hash32(reinterpret_cast<const char*>(&sampleFieldValue.mValue.int_value),
+ sizeof(sampleFieldValue.mValue.int_value));
+ break;
+ case LONG:
+ hashValue = Hash32(reinterpret_cast<const char*>(&sampleFieldValue.mValue.long_value),
+ sizeof(sampleFieldValue.mValue.long_value));
+ break;
+ case FLOAT:
+ hashValue = Hash32(reinterpret_cast<const char*>(&sampleFieldValue.mValue.float_value),
+ sizeof(sampleFieldValue.mValue.float_value));
+ break;
+ case DOUBLE:
+ hashValue = Hash32(reinterpret_cast<const char*>(&sampleFieldValue.mValue.double_value),
+ sizeof(sampleFieldValue.mValue.double_value));
+ break;
+ case STRING:
+ hashValue = Hash32(sampleFieldValue.mValue.str_value);
+ break;
+ case STORAGE:
+ hashValue = Hash32((const char*)sampleFieldValue.mValue.storage_value.data(),
+ sampleFieldValue.mValue.storage_value.size());
+ break;
+ default:
+ return true;
+ }
+ return (hashValue + shardOffset) % shardCount == 0;
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/FieldValue.h b/statsd/src/FieldValue.h
index 979264c..286005a 100644
--- a/statsd/src/FieldValue.h
+++ b/statsd/src/FieldValue.h
@@ -16,7 +16,6 @@
#pragma once
#include "src/statsd_config.pb.h"
-#include "annotations.h"
namespace android {
namespace os {
@@ -241,7 +240,7 @@
}
bool hasAllPositionMatcher() const {
- return mMatcher.getDepth() >= 1 && getRawMaskAtDepth(1) == 0x7f;
+ return mMatcher.getDepth() >= 1 && mMatcher.getRawPosAtDepth(1) == 0;
}
inline bool operator!=(const Matcher& that) const {
@@ -390,6 +389,8 @@
// Default value = false
inline bool isUidField() const { return getValueFromBitmask(UID_POS); }
+ std::string toString() const;
+
private:
inline void setBitmaskAtPos(int pos, bool value) {
mBooleanBitmask &= ~(1 << pos); // clear
@@ -465,6 +466,9 @@
// Estimate the memory size of the FieldValues. This is different from sizeof(FieldValue) because
// the size is computed at runtime using the actual contents stored in the FieldValue.
size_t getSize(const std::vector<FieldValue>& fieldValues);
+
+bool shouldKeepSample(const FieldValue& sampleFieldValue, int shardOffset, int shardCount);
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/HashableDimensionKey.cpp b/statsd/src/HashableDimensionKey.cpp
index c1b250a..447a731 100644
--- a/statsd/src/HashableDimensionKey.cpp
+++ b/statsd/src/HashableDimensionKey.cpp
@@ -135,6 +135,16 @@
android::hash_type(fieldValue.mValue.float_value));
break;
}
+ case DOUBLE: {
+ hash = android::JenkinsHashMix(hash,
+ android::hash_type(fieldValue.mValue.double_value));
+ break;
+ }
+ case STORAGE: {
+ hash = android::JenkinsHashMixBytes(hash, fieldValue.mValue.storage_value.data(),
+ fieldValue.mValue.storage_value.size());
+ break;
+ }
default:
break;
}
@@ -144,6 +154,9 @@
bool filterValues(const Matcher& matcherField, const vector<FieldValue>& values,
FieldValue* output) {
+ if (matcherField.hasAllPositionMatcher()) {
+ return false;
+ }
for (const auto& value : values) {
if (value.mField.matches(matcherField)) {
(*output) = value;
diff --git a/statsd/src/HashableDimensionKey.h b/statsd/src/HashableDimensionKey.h
index 19a47df..b5df456 100644
--- a/statsd/src/HashableDimensionKey.h
+++ b/statsd/src/HashableDimensionKey.h
@@ -166,7 +166,8 @@
/**
* Returns true if a FieldValue field matches the matcher field.
- * The value of the FieldValue is output.
+ * This function can only be used to match one field (i.e. matcher with position ALL will return
+ * false). The value of the FieldValue is output.
*/
bool filterValues(const Matcher& matcherField, const std::vector<FieldValue>& values,
FieldValue* output);
diff --git a/statsd/src/StatsLogProcessor.cpp b/statsd/src/StatsLogProcessor.cpp
index b90a5b7..85f0008 100644
--- a/statsd/src/StatsLogProcessor.cpp
+++ b/statsd/src/StatsLogProcessor.cpp
@@ -20,6 +20,7 @@
#include "StatsLogProcessor.h"
#include <android-base/file.h>
+#include <android-modules-utils/sdk_level.h>
#include <cutils/multiuser.h>
#include <src/active_config_list.pb.h>
#include <src/experiment_ids.pb.h>
@@ -39,6 +40,7 @@
using namespace android;
using android::base::StringPrintf;
+using android::modules::sdklevel::IsAtLeastU;
using android::util::FIELD_COUNT_REPEATED;
using android::util::FIELD_TYPE_BOOL;
using android::util::FIELD_TYPE_FLOAT;
@@ -53,6 +55,8 @@
namespace os {
namespace statsd {
+using aidl::android::os::IStatsQueryCallback;
+
// for ConfigMetricsReportList
const int FIELD_ID_CONFIG_KEY = 1;
const int FIELD_ID_REPORTS = 2;
@@ -84,25 +88,32 @@
// Cool down period for writing data to disk to avoid overwriting files.
#define WRITE_DATA_COOL_DOWN_SEC 15
-StatsLogProcessor::StatsLogProcessor(const sp<UidMap>& uidMap,
- const sp<StatsPullerManager>& pullerManager,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- const sp<AlarmMonitor>& periodicAlarmMonitor,
- const int64_t timeBaseNs,
- const std::function<bool(const ConfigKey&)>& sendBroadcast,
- const std::function<bool(
- const int&, const vector<int64_t>&)>& activateBroadcast)
- : mUidMap(uidMap),
+StatsLogProcessor::StatsLogProcessor(
+ const sp<UidMap>& uidMap, const sp<StatsPullerManager>& pullerManager,
+ const sp<AlarmMonitor>& anomalyAlarmMonitor, const sp<AlarmMonitor>& periodicAlarmMonitor,
+ const int64_t timeBaseNs, const std::function<bool(const ConfigKey&)>& sendBroadcast,
+ const std::function<bool(const int&, const vector<int64_t>&)>& activateBroadcast,
+ const std::function<void(const ConfigKey&, const string&, const vector<int64_t>&)>&
+ sendRestrictedMetricsBroadcast,
+ const std::shared_ptr<LogEventFilter>& logEventFilter)
+ : mLastTtlTime(0),
+ mLastFlushRestrictedTime(0),
+ mLastDbGuardrailEnforcementTime(0),
+ mUidMap(uidMap),
mPullerManager(pullerManager),
mAnomalyAlarmMonitor(anomalyAlarmMonitor),
mPeriodicAlarmMonitor(periodicAlarmMonitor),
+ mLogEventFilter(logEventFilter),
mSendBroadcast(sendBroadcast),
mSendActivationBroadcast(activateBroadcast),
+ mSendRestrictedMetricsBroadcast(sendRestrictedMetricsBroadcast),
mTimeBaseNs(timeBaseNs),
mLargestTimestampSeen(0),
mLastTimestampSeen(0) {
mPullerManager->ForceClearPullerCache();
StateManager::getInstance().updateLogSources(uidMap);
+ // It is safe called locked version at constructor - no concurrent access possible
+ updateLogEventFilterLocked();
}
StatsLogProcessor::~StatsLogProcessor() {
@@ -123,15 +134,14 @@
void StatsLogProcessor::processFiredAnomalyAlarmsLocked(
const int64_t& timestampNs,
- unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet) {
+ unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet) {
for (const auto& itr : mMetricsManagers) {
itr.second->onAnomalyAlarmFired(timestampNs, alarmSet);
}
}
void StatsLogProcessor::onPeriodicAlarmFired(
const int64_t& timestampNs,
- unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet) {
-
+ unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
for (const auto& itr : mMetricsManagers) {
itr.second->onPeriodicAlarmFired(timestampNs, alarmSet);
@@ -267,17 +277,17 @@
int64_t firstId = trainInfo->experimentIds.at(0);
auto& ids = trainInfo->experimentIds;
switch (trainInfo->status) {
- case android::os::statsd::util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALL_SUCCESS:
+ case util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALL_SUCCESS:
if (find(ids.begin(), ids.end(), firstId + 1) == ids.end()) {
ids.push_back(firstId + 1);
}
break;
- case android::os::statsd::util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALLER_ROLLBACK_INITIATED:
+ case util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALLER_ROLLBACK_INITIATED:
if (find(ids.begin(), ids.end(), firstId + 2) == ids.end()) {
ids.push_back(firstId + 2);
}
break;
- case android::os::statsd::util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALLER_ROLLBACK_SUCCESS:
+ case util::BINARY_PUSH_STATE_CHANGED__STATE__INSTALLER_ROLLBACK_SUCCESS:
if (find(ids.begin(), ids.end(), firstId + 3) == ids.end()) {
ids.push_back(firstId + 3);
}
@@ -346,13 +356,13 @@
int64_t firstId = trainInfoOnDisk.experimentIds[0];
auto& ids = trainInfoOnDisk.experimentIds;
switch (rollbackTypeIn) {
- case android::os::statsd::util::WATCHDOG_ROLLBACK_OCCURRED__ROLLBACK_TYPE__ROLLBACK_INITIATE:
+ case util::WATCHDOG_ROLLBACK_OCCURRED__ROLLBACK_TYPE__ROLLBACK_INITIATE:
if (find(ids.begin(), ids.end(), firstId + 4) == ids.end()) {
ids.push_back(firstId + 4);
}
StorageManager::writeTrainInfo(trainInfoOnDisk);
break;
- case android::os::statsd::util::WATCHDOG_ROLLBACK_OCCURRED__ROLLBACK_TYPE__ROLLBACK_SUCCESS:
+ case util::WATCHDOG_ROLLBACK_OCCURRED__ROLLBACK_TYPE__ROLLBACK_SUCCESS:
if (find(ids.begin(), ids.end(), firstId + 5) == ids.end()) {
ids.push_back(firstId + 5);
}
@@ -385,8 +395,9 @@
// Tell StatsdStats about new event
const int64_t eventElapsedTimeNs = event->GetElapsedTimestampNs();
- int atomId = event->GetTagId();
- StatsdStats::getInstance().noteAtomLogged(atomId, eventElapsedTimeNs / NS_PER_SEC);
+ const int atomId = event->GetTagId();
+ StatsdStats::getInstance().noteAtomLogged(atomId, eventElapsedTimeNs / NS_PER_SEC,
+ event->isParsedHeaderOnly());
if (!event->isValid()) {
StatsdStats::getInstance().noteAtomError(atomId);
return;
@@ -394,13 +405,13 @@
// Hard-coded logic to update train info on disk and fill in any information
// this log event may be missing.
- if (atomId == android::os::statsd::util::BINARY_PUSH_STATE_CHANGED) {
+ if (atomId == util::BINARY_PUSH_STATE_CHANGED) {
onBinaryPushStateChangedEventLocked(event);
}
// Hard-coded logic to update experiment ids on disk for certain rollback
// types and fill the rollback atom with experiment ids
- if (atomId == android::os::statsd::util::WATCHDOG_ROLLBACK_OCCURRED) {
+ if (atomId == util::WATCHDOG_ROLLBACK_OCCURRED) {
onWatchdogRollbackOccurredLocked(event);
}
@@ -411,7 +422,7 @@
// Hard-coded logic to update the isolated uid's in the uid-map.
// The field numbers need to be currently updated by hand with atoms.proto
- if (atomId == android::os::statsd::util::ISOLATED_UID_CHANGED) {
+ if (atomId == util::ISOLATED_UID_CHANGED) {
onIsolatedUidChangedEventLocked(*event);
} else {
// Map the isolated uid to host uid if necessary.
@@ -438,16 +449,24 @@
informAnomalyAlarmFiredLocked(NanoToMillis(elapsedRealtimeNs));
}
- int64_t curTimeSec = getElapsedRealtimeSec();
+ const int64_t curTimeSec = getElapsedRealtimeSec();
if (curTimeSec - mLastPullerCacheClearTimeSec > StatsdStats::kPullerCacheClearIntervalSec) {
mPullerManager->ClearPullerCacheIfNecessary(curTimeSec * NS_PER_SEC);
mLastPullerCacheClearTimeSec = curTimeSec;
}
+ flushRestrictedDataIfNecessaryLocked(elapsedRealtimeNs);
+ enforceDataTtlsIfNecessaryLocked(getWallClockNs(), elapsedRealtimeNs);
+ enforceDbGuardrailsIfNecessaryLocked(getWallClockNs(), elapsedRealtimeNs);
+
std::unordered_set<int> uidsWithActiveConfigsChanged;
std::unordered_map<int, std::vector<int64_t>> activeConfigsPerUid;
+
// pass the event to metrics managers.
for (auto& pair : mMetricsManagers) {
+ if (event->isRestricted() && !pair.second->hasRestrictedMetricsDelegate()) {
+ continue;
+ }
int uid = pair.first.GetUid();
int64_t configId = pair.first.GetId();
bool isPrevActive = pair.second->isActive();
@@ -531,9 +550,21 @@
void StatsLogProcessor::OnConfigUpdatedLocked(const int64_t timestampNs, const ConfigKey& key,
const StatsdConfig& config, bool modularUpdate) {
VLOG("Updated configuration for key %s", key.ToString().c_str());
- // Create new config if this is not a modular update or if this is a new config.
const auto& it = mMetricsManagers.find(key);
bool configValid = false;
+ if (IsAtLeastU() && it != mMetricsManagers.end()) {
+ if (it->second->hasRestrictedMetricsDelegate() !=
+ config.has_restricted_metrics_delegate_package_name()) {
+ // Not a modular update if has_restricted_metrics_delegate changes
+ modularUpdate = false;
+ }
+ if (!modularUpdate && it->second->hasRestrictedMetricsDelegate()) {
+ // Always delete the old db if restricted metrics config is not a
+ // modular update.
+ dbutils::deleteDb(key);
+ }
+ }
+ // Create new config if this is not a modular update or if this is a new config.
if (!modularUpdate || it == mMetricsManagers.end()) {
sp<MetricsManager> newMetricsManager =
new MetricsManager(key, config, mTimeBaseNs, timestampNs, mUidMap, mPullerManager,
@@ -541,8 +572,23 @@
configValid = newMetricsManager->isConfigValid();
if (configValid) {
newMetricsManager->init();
- mUidMap->OnConfigUpdated(key);
newMetricsManager->refreshTtl(timestampNs);
+ // Sdk check for U+ is unnecessary because config with restricted metrics delegate
+ // will be invalid on non U+ devices.
+ if (newMetricsManager->hasRestrictedMetricsDelegate()) {
+ mSendRestrictedMetricsBroadcast(key,
+ newMetricsManager->getRestrictedMetricsDelegate(),
+ newMetricsManager->getAllMetricIds());
+ string err;
+ if (!dbutils::updateDeviceInfoTable(key, err)) {
+ ALOGE("Failed to create device_info table for configKey %s, err: %s",
+ key.ToString().c_str(), err.c_str());
+ StatsdStats::getInstance().noteDeviceInfoTableCreationFailed(key);
+ }
+ } else if (it != mMetricsManagers.end() && it->second->hasRestrictedMetricsDelegate()) {
+ mSendRestrictedMetricsBroadcast(key, it->second->getRestrictedMetricsDelegate(),
+ {});
+ }
mMetricsManagers[key] = newMetricsManager;
VLOG("StatsdConfig valid");
}
@@ -550,16 +596,34 @@
// Preserve the existing MetricsManager, update necessary components and metadata in place.
configValid = it->second->updateConfig(config, mTimeBaseNs, timestampNs,
mAnomalyAlarmMonitor, mPeriodicAlarmMonitor);
- if (configValid) {
- mUidMap->OnConfigUpdated(key);
+ if (configValid && it->second->hasRestrictedMetricsDelegate()) {
+ mSendRestrictedMetricsBroadcast(key, it->second->getRestrictedMetricsDelegate(),
+ it->second->getAllMetricIds());
}
}
+
+ if (configValid && !config.has_restricted_metrics_delegate_package_name()) {
+ // We do not need to track uid map changes for restricted metrics since the uidmap is not
+ // stored in the sqlite db.
+ mUidMap->OnConfigUpdated(key);
+ } else if (configValid && config.has_restricted_metrics_delegate_package_name()) {
+ mUidMap->OnConfigRemoved(key);
+ }
if (!configValid) {
// If there is any error in the config, don't use it.
// Remove any existing config with the same key.
ALOGE("StatsdConfig NOT valid");
+ // Send an empty restricted metrics broadcast if the previous config was restricted.
+ if (IsAtLeastU() && it != mMetricsManagers.end() &&
+ it->second->hasRestrictedMetricsDelegate()) {
+ mSendRestrictedMetricsBroadcast(key, it->second->getRestrictedMetricsDelegate(), {});
+ dbutils::deleteDb(key);
+ }
mMetricsManagers.erase(key);
+ mUidMap->OnConfigRemoved(key);
}
+
+ updateLogEventFilterLocked();
}
size_t StatsLogProcessor::GetMetricsSize(const ConfigKey& key) const {
@@ -596,6 +660,12 @@
const DumpLatency dumpLatency, ProtoOutputStream* proto) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
+ auto it = mMetricsManagers.find(key);
+ if (it != mMetricsManagers.end() && it->second->hasRestrictedMetricsDelegate()) {
+ VLOG("Unexpected call to StatsLogProcessor::onDumpReport for restricted metrics.");
+ return;
+ }
+
// Start of ConfigKey.
uint64_t configKeyToken = proto->start(FIELD_TYPE_MESSAGE | FIELD_ID_CONFIG_KEY);
proto->write(FIELD_TYPE_INT32 | FIELD_ID_UID, key.GetUid());
@@ -604,7 +674,6 @@
// End of ConfigKey.
bool keepFile = false;
- auto it = mMetricsManagers.find(key);
if (it != mMetricsManagers.end() && it->second->shouldPersistLocalHistory()) {
keepFile = true;
}
@@ -678,6 +747,12 @@
if (it == mMetricsManagers.end()) {
return;
}
+ if (it->second->hasRestrictedMetricsDelegate()) {
+ VLOG("Unexpected call to StatsLogProcessor::onConfigMetricsReportLocked for restricted "
+ "metrics.");
+ // Do not call onDumpReport for restricted metrics.
+ return;
+ }
int64_t lastReportTimeNs = it->second->getLastReportTimeNs();
int64_t lastReportWallClockNs = it->second->getLastReportWallClockNs();
@@ -765,6 +840,10 @@
if (it != mMetricsManagers.end()) {
WriteDataToDiskLocked(key, getElapsedRealtimeNs(), getWallClockNs(), CONFIG_REMOVED,
NO_TIME_CONSTRAINTS);
+ if (IsAtLeastU() && it->second->hasRestrictedMetricsDelegate()) {
+ dbutils::deleteDb(key);
+ mSendRestrictedMetricsBroadcast(key, it->second->getRestrictedMetricsDelegate(), {});
+ }
mMetricsManagers.erase(it);
mUidMap->OnConfigRemoved(key);
}
@@ -787,6 +866,221 @@
if (mMetricsManagers.empty()) {
mPullerManager->ForceClearPullerCache();
}
+
+ updateLogEventFilterLocked();
+}
+
+// TODO(b/267501143): Add unit tests when metric producer is ready
+void StatsLogProcessor::enforceDataTtlsIfNecessaryLocked(const int64_t wallClockNs,
+ const int64_t elapsedRealtimeNs) {
+ if (!IsAtLeastU()) {
+ return;
+ }
+ if (elapsedRealtimeNs - mLastTtlTime < StatsdStats::kMinTtlCheckPeriodNs) {
+ return;
+ }
+ enforceDataTtlsLocked(wallClockNs, elapsedRealtimeNs);
+}
+
+void StatsLogProcessor::flushRestrictedDataIfNecessaryLocked(const int64_t elapsedRealtimeNs) {
+ if (!IsAtLeastU()) {
+ return;
+ }
+ if (elapsedRealtimeNs - mLastFlushRestrictedTime < StatsdStats::kMinFlushRestrictedPeriodNs) {
+ return;
+ }
+ flushRestrictedDataLocked(elapsedRealtimeNs);
+}
+
+void StatsLogProcessor::querySql(const string& sqlQuery, const int32_t minSqlClientVersion,
+ const optional<vector<uint8_t>>& policyConfig,
+ const shared_ptr<IStatsQueryCallback>& callback,
+ const int64_t configId, const string& configPackage,
+ const int32_t callingUid) {
+ std::lock_guard<std::mutex> lock(mMetricsMutex);
+ string err = "";
+
+ if (!IsAtLeastU()) {
+ ALOGW("Restricted metrics query invoked on U- device");
+ StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
+ configId, configPackage, std::nullopt, callingUid,
+ InvalidQueryReason(FLAG_DISABLED));
+ return;
+ }
+
+ const int64_t elapsedRealtimeNs = getElapsedRealtimeNs();
+
+ // TODO(b/268416460): validate policyConfig here
+
+ if (minSqlClientVersion > dbutils::getDbVersion()) {
+ callback->sendFailure(StringPrintf(
+ "Unsupported sqlite version. Installed Version: %d, Requested Version: %d.",
+ dbutils::getDbVersion(), minSqlClientVersion));
+ StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
+ configId, configPackage, std::nullopt, callingUid,
+ InvalidQueryReason(UNSUPPORTED_SQLITE_VERSION));
+ return;
+ }
+
+ set<int32_t> configPackageUids;
+ const auto& uidMapItr = UidMap::sAidToUidMapping.find(configPackage);
+ if (uidMapItr != UidMap::sAidToUidMapping.end()) {
+ configPackageUids.insert(uidMapItr->second);
+ } else {
+ configPackageUids = mUidMap->getAppUid(configPackage);
+ }
+
+ InvalidQueryReason invalidQueryReason;
+ set<ConfigKey> keysToQuery = getRestrictedConfigKeysToQueryLocked(
+ callingUid, configId, configPackageUids, err, invalidQueryReason);
+
+ if (keysToQuery.empty()) {
+ callback->sendFailure(err);
+ StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
+ configId, configPackage, std::nullopt, callingUid,
+ InvalidQueryReason(invalidQueryReason));
+ return;
+ }
+
+ if (keysToQuery.size() > 1) {
+ err = "Ambiguous ConfigKey";
+ callback->sendFailure(err);
+ StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
+ configId, configPackage, std::nullopt, callingUid,
+ InvalidQueryReason(AMBIGUOUS_CONFIG_KEY));
+ return;
+ }
+
+ flushRestrictedDataLocked(elapsedRealtimeNs);
+ enforceDataTtlsLocked(getWallClockNs(), elapsedRealtimeNs);
+
+ std::vector<std::vector<std::string>> rows;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ if (!dbutils::query(*(keysToQuery.begin()), sqlQuery, rows, columnTypes, columnNames, err)) {
+ callback->sendFailure(StringPrintf("failed to query db %s:", err.c_str()));
+ StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
+ configId, configPackage, keysToQuery.begin()->GetUid(), callingUid,
+ InvalidQueryReason(QUERY_FAILURE), err.c_str());
+ return;
+ }
+
+ vector<string> queryData;
+ queryData.reserve(rows.size() * columnNames.size());
+ // TODO(b/268415904): avoid this vector transformation.
+ if (columnNames.size() != columnTypes.size()) {
+ callback->sendFailure("Inconsistent row sizes");
+ StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
+ configId, configPackage, keysToQuery.begin()->GetUid(), callingUid,
+ InvalidQueryReason(INCONSISTENT_ROW_SIZE));
+ }
+ for (size_t i = 0; i < rows.size(); ++i) {
+ if (rows[i].size() != columnNames.size()) {
+ callback->sendFailure("Inconsistent row sizes");
+ StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
+ configId, configPackage, keysToQuery.begin()->GetUid(), callingUid,
+ InvalidQueryReason(INCONSISTENT_ROW_SIZE));
+ return;
+ }
+ queryData.insert(std::end(queryData), std::make_move_iterator(std::begin(rows[i])),
+ std::make_move_iterator(std::end(rows[i])));
+ }
+ callback->sendResults(queryData, columnNames, columnTypes, rows.size());
+ StatsdStats::getInstance().noteQueryRestrictedMetricSucceed(
+ configId, configPackage, keysToQuery.begin()->GetUid(), callingUid,
+ /*queryLatencyNs=*/getElapsedRealtimeNs() - elapsedRealtimeNs);
+}
+
+set<ConfigKey> StatsLogProcessor::getRestrictedConfigKeysToQueryLocked(
+ const int32_t callingUid, const int64_t configId, const set<int32_t>& configPackageUids,
+ string& err, InvalidQueryReason& invalidQueryReason) {
+ set<ConfigKey> matchedConfigKeys;
+ for (auto uid : configPackageUids) {
+ ConfigKey configKey(uid, configId);
+ if (mMetricsManagers.find(configKey) != mMetricsManagers.end()) {
+ matchedConfigKeys.insert(configKey);
+ }
+ }
+
+ set<ConfigKey> excludedKeys;
+ for (auto& configKey : matchedConfigKeys) {
+ auto it = mMetricsManagers.find(configKey);
+ if (!it->second->validateRestrictedMetricsDelegate(callingUid)) {
+ excludedKeys.insert(configKey);
+ };
+ }
+
+ set<ConfigKey> result;
+ std::set_difference(matchedConfigKeys.begin(), matchedConfigKeys.end(), excludedKeys.begin(),
+ excludedKeys.end(), std::inserter(result, result.end()));
+ if (matchedConfigKeys.empty()) {
+ err = "No configs found matching the config key";
+ invalidQueryReason = InvalidQueryReason(CONFIG_KEY_NOT_FOUND);
+ } else if (result.empty()) {
+ err = "No matching configs for restricted metrics delegate";
+ invalidQueryReason = InvalidQueryReason(CONFIG_KEY_WITH_UNMATCHED_DELEGATE);
+ }
+
+ return result;
+}
+
+void StatsLogProcessor::EnforceDataTtls(const int64_t wallClockNs,
+ const int64_t elapsedRealtimeNs) {
+ if (!IsAtLeastU()) {
+ return;
+ }
+ std::lock_guard<std::mutex> lock(mMetricsMutex);
+ enforceDataTtlsLocked(wallClockNs, elapsedRealtimeNs);
+}
+
+void StatsLogProcessor::enforceDataTtlsLocked(const int64_t wallClockNs,
+ const int64_t elapsedRealtimeNs) {
+ for (const auto& itr : mMetricsManagers) {
+ itr.second->enforceRestrictedDataTtls(wallClockNs);
+ }
+ mLastTtlTime = elapsedRealtimeNs;
+}
+
+void StatsLogProcessor::enforceDbGuardrailsIfNecessaryLocked(const int64_t wallClockNs,
+ const int64_t elapsedRealtimeNs) {
+ if (elapsedRealtimeNs - mLastDbGuardrailEnforcementTime <
+ StatsdStats::kMinDbGuardrailEnforcementPeriodNs) {
+ return;
+ }
+ StorageManager::enforceDbGuardrails(STATS_RESTRICTED_DATA_DIR, wallClockNs / NS_PER_SEC,
+ StatsdStats::kMaxFileSize);
+ mLastDbGuardrailEnforcementTime = elapsedRealtimeNs;
+}
+
+void StatsLogProcessor::fillRestrictedMetrics(const int64_t configId, const string& configPackage,
+ const int32_t delegateUid, vector<int64_t>* output) {
+ std::lock_guard<std::mutex> lock(mMetricsMutex);
+
+ set<int32_t> configPackageUids;
+ const auto& uidMapItr = UidMap::sAidToUidMapping.find(configPackage);
+ if (uidMapItr != UidMap::sAidToUidMapping.end()) {
+ configPackageUids.insert(uidMapItr->second);
+ } else {
+ configPackageUids = mUidMap->getAppUid(configPackage);
+ }
+ string err;
+ InvalidQueryReason invalidQueryReason;
+ set<ConfigKey> keysToGetMetrics = getRestrictedConfigKeysToQueryLocked(
+ delegateUid, configId, configPackageUids, err, invalidQueryReason);
+
+ for (const ConfigKey& key : keysToGetMetrics) {
+ vector<int64_t> metricIds = mMetricsManagers[key]->getAllMetricIds();
+ output->insert(output->end(), metricIds.begin(), metricIds.end());
+ }
+}
+
+void StatsLogProcessor::flushRestrictedDataLocked(const int64_t elapsedRealtimeNs) {
+ for (const auto& it : mMetricsManagers) {
+ // no-op if metricsManager is not restricted
+ it.second->flushRestrictedData();
+ }
+
+ mLastFlushRestrictedTime = elapsedRealtimeNs;
}
void StatsLogProcessor::flushIfNecessaryLocked(const ConfigKey& key,
@@ -801,22 +1095,31 @@
// We suspect that the byteSize() computation is expensive, so we set a rate limit.
size_t totalBytes = metricsManager.byteSize();
+
mLastByteSizeTimes[key] = elapsedRealtimeNs;
+ const size_t kBytesPerConfig = metricsManager.hasRestrictedMetricsDelegate()
+ ? StatsdStats::kBytesPerRestrictedConfigTriggerFlush
+ : StatsdStats::kBytesPerConfigTriggerGetData;
bool requestDump = false;
if (totalBytes > StatsdStats::kMaxMetricsBytesPerConfig) {
// Too late. We need to start clearing data.
metricsManager.dropData(elapsedRealtimeNs);
StatsdStats::getInstance().noteDataDropped(key, totalBytes);
VLOG("StatsD had to toss out metrics for %s", key.ToString().c_str());
- } else if ((totalBytes > StatsdStats::kBytesPerConfigTriggerGetData) ||
+ } else if ((totalBytes > kBytesPerConfig) ||
(mOnDiskDataConfigs.find(key) != mOnDiskDataConfigs.end())) {
- // Request to send a broadcast if:
+ // Request to dump if:
// 1. in memory data > threshold OR
// 2. config has old data report on disk.
requestDump = true;
}
if (requestDump) {
+ if (metricsManager.hasRestrictedMetricsDelegate()) {
+ metricsManager.flushRestrictedData();
+ // No need to send broadcast for restricted metrics.
+ return;
+ }
// Send broadcast so that receivers can pull data.
auto lastBroadcastTime = mLastBroadcastTimes.find(key);
if (lastBroadcastTime != mLastBroadcastTimes.end()) {
@@ -843,6 +1146,10 @@
!mMetricsManagers.find(key)->second->shouldWriteToDisk()) {
return;
}
+ if (mMetricsManagers.find(key)->second->hasRestrictedMetricsDelegate()) {
+ mMetricsManagers.find(key)->second->flushRestrictedData();
+ return;
+ }
vector<uint8_t> buffer;
onConfigMetricsReportLocked(key, timestampNs, wallClockNs,
true /* include_current_partial_bucket*/, true /* erase_data */,
@@ -1149,7 +1456,7 @@
void StatsLogProcessor::informAnomalyAlarmFiredLocked(const int64_t elapsedTimeMillis) {
VLOG("StatsService::informAlarmForSubscriberTriggeringFired was called");
- std::unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet =
+ unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet =
mAnomalyAlarmMonitor->popSoonerThan(static_cast<uint32_t>(elapsedTimeMillis / 1000));
if (alarmSet.size() > 0) {
VLOG("Found periodic alarm fired.");
@@ -1159,6 +1466,31 @@
}
}
+LogEventFilter::AtomIdSet StatsLogProcessor::getDefaultAtomIdSet() {
+ // populate hard-coded list of useful atoms
+ // we add also atoms which could be pushed by statsd itself to simplify the logic
+ // to handle metric configs update: APP_BREADCRUMB_REPORTED & ANOMALY_DETECTED
+ LogEventFilter::AtomIdSet allAtomIds{
+ util::BINARY_PUSH_STATE_CHANGED, util::DAVEY_OCCURRED,
+ util::ISOLATED_UID_CHANGED, util::APP_BREADCRUMB_REPORTED,
+ util::WATCHDOG_ROLLBACK_OCCURRED, util::ANOMALY_DETECTED};
+ return allAtomIds;
+}
+
+void StatsLogProcessor::updateLogEventFilterLocked() const {
+ VLOG("StatsLogProcessor: Updating allAtomIds");
+ if (!mLogEventFilter) {
+ return;
+ }
+ LogEventFilter::AtomIdSet allAtomIds = getDefaultAtomIdSet();
+ for (const auto& metricsManager : mMetricsManagers) {
+ metricsManager.second->addAllAtomIds(allAtomIds);
+ }
+ StateManager::getInstance().addAllAtomIds(allAtomIds);
+ VLOG("StatsLogProcessor: Updating allAtomIds done. Total atoms %d", (int)allAtomIds.size());
+ mLogEventFilter->setAtomIds(std::move(allAtomIds), this);
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/StatsLogProcessor.h b/statsd/src/StatsLogProcessor.h
index 231b411..4841263 100644
--- a/statsd/src/StatsLogProcessor.h
+++ b/statsd/src/StatsLogProcessor.h
@@ -16,33 +16,37 @@
#pragma once
+#include <aidl/android/os/BnStatsd.h>
#include <gtest/gtest_prod.h>
+#include <stdio.h>
+
+#include <unordered_map>
+
#include "config/ConfigListener.h"
+#include "external/StatsPullerManager.h"
#include "logd/LogEvent.h"
#include "metrics/MetricsManager.h"
#include "packages/UidMap.h"
-#include "external/StatsPullerManager.h"
-
+#include "socket/LogEventFilter.h"
#include "src/statsd_config.pb.h"
#include "src/statsd_metadata.pb.h"
-#include <stdio.h>
-#include <unordered_map>
-
namespace android {
namespace os {
namespace statsd {
-
class StatsLogProcessor : public ConfigListener, public virtual PackageInfoListener {
public:
- StatsLogProcessor(const sp<UidMap>& uidMap, const sp<StatsPullerManager>& pullerManager,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- const sp<AlarmMonitor>& subscriberTriggerAlarmMonitor,
- const int64_t timeBaseNs,
- const std::function<bool(const ConfigKey&)>& sendBroadcast,
- const std::function<bool(const int&,
- const vector<int64_t>&)>& sendActivationBroadcast);
+ StatsLogProcessor(
+ const sp<UidMap>& uidMap, const sp<StatsPullerManager>& pullerManager,
+ const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ const sp<AlarmMonitor>& subscriberTriggerAlarmMonitor, const int64_t timeBaseNs,
+ const std::function<bool(const ConfigKey&)>& sendBroadcast,
+ const std::function<bool(const int&, const vector<int64_t>&)>& sendActivationBroadcast,
+ const std::function<void(const ConfigKey&, const string&, const vector<int64_t>&)>&
+ sendRestrictedMetricsBroadcast,
+ const std::shared_ptr<LogEventFilter>& logEventFilter);
+
virtual ~StatsLogProcessor();
void OnLogEvent(LogEvent* event);
@@ -75,7 +79,7 @@
/* Tells MetricsManager that the alarms in alarmSet have fired. Modifies periodic alarmSet. */
void onPeriodicAlarmFired(
const int64_t& timestampNs,
- unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet);
+ unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet);
/* Flushes data to disk. Data on memory will be gone after written to disk. */
void WriteDataToDisk(const DumpReportReason dumpReportReason, const DumpLatency dumpLatency,
@@ -108,6 +112,9 @@
int64_t currentWallClockTimeNs,
int64_t systemElapsedTimeNs);
+ /* Enforces ttls for restricted metrics */
+ void EnforceDataTtls(const int64_t wallClockNs, const int64_t elapsedRealtimeNs);
+
/* Sets the active status/ttl for all configs and metrics to the status in ActiveConfigList. */
void SetConfigsActiveState(const ActiveConfigList& activeConfigList, int64_t currentTimeNs);
@@ -142,6 +149,12 @@
inline void setPrintLogs(bool enabled) {
std::lock_guard<std::mutex> lock(mMetricsMutex);
mPrintAllLogs = enabled;
+
+ if (mLogEventFilter) {
+ // Turning on print logs turns off pushed event filtering to enforce
+ // complete log event buffer parsing
+ mLogEventFilter->setFilteringEnabled(!enabled);
+ }
}
// Add a specific config key to the possible configs to dump ASAP.
@@ -151,6 +164,17 @@
void cancelAnomalyAlarm();
+ void querySql(const string& sqlQuery, const int32_t minSqlClientVersion,
+ const optional<vector<uint8_t>>& policyConfig,
+ const shared_ptr<aidl::android::os::IStatsQueryCallback>& callback,
+ const int64_t configId, const string& configPackage, const int32_t callingUid);
+
+ void fillRestrictedMetrics(const int64_t configId, const string& configPackage,
+ const int32_t delegateUid, vector<int64_t>* output);
+
+ /* Returns pre-defined list of atoms to parse by LogEventFilter */
+ static LogEventFilter::AtomIdSet getDefaultAtomIdSet();
+
private:
// For testing only.
inline sp<AlarmMonitor> getAnomalyAlarmMonitor() const {
@@ -178,6 +202,15 @@
// Tracks when we last checked the bytes consumed for each config key.
std::unordered_map<ConfigKey, int64_t> mLastByteSizeTimes;
+ // Tracks when we last checked the ttl for restricted metrics.
+ int64_t mLastTtlTime;
+
+ // Tracks when we last flushed restricted metrics.
+ int64_t mLastFlushRestrictedTime;
+
+ // Tracks when we last checked db guardrails.
+ int64_t mLastDbGuardrailEnforcementTime;
+
// Tracks which config keys has metric reports on disk
std::set<ConfigKey> mOnDiskDataConfigs;
@@ -189,6 +222,8 @@
sp<AlarmMonitor> mPeriodicAlarmMonitor;
+ std::shared_ptr<LogEventFilter> mLogEventFilter;
+
void OnLogEvent(LogEvent* event, int64_t elapsedRealtimeNs);
void resetIfConfigTtlExpiredLocked(const int64_t eventTimeNs);
@@ -228,10 +263,28 @@
(e.g., before reboot). So no need to further persist local history.*/
const bool dataSavedToDisk, vector<uint8_t>* proto);
+ /* Check if it is time enforce data ttls for restricted metrics, and if it is, enforce ttls
+ * on all restricted metrics. */
+ void enforceDataTtlsIfNecessaryLocked(const int64_t wallClockNs,
+ const int64_t elapsedRealtimeNs);
+
+ // Enforces ttls on all restricted metrics.
+ void enforceDataTtlsLocked(const int64_t wallClockNs, const int64_t elapsedRealtimeNs);
+
+ // Enforces that dbs are within guardrail parameters.
+ void enforceDbGuardrailsIfNecessaryLocked(const int64_t wallClockNs,
+ const int64_t elapsedRealtimeNs);
+
/* Check if we should send a broadcast if approaching memory limits and if we're over, we
* actually delete the data. */
void flushIfNecessaryLocked(const ConfigKey& key, MetricsManager& metricsManager);
+ set<ConfigKey> getRestrictedConfigKeysToQueryLocked(const int32_t callingUid,
+ const int64_t configId,
+ const set<int32_t>& configPackageUids,
+ string& err,
+ InvalidQueryReason& invalidQueryReason);
+
// Maps the isolated uid in the log event to host uid if the log event contains uid fields.
void mapIsolatedUidToHostUidIfNecessaryLocked(LogEvent* event) const;
@@ -266,7 +319,14 @@
/* Tells MetricsManager that the alarms in alarmSet have fired. Modifies anomaly alarmSet. */
void processFiredAnomalyAlarmsLocked(
const int64_t& timestampNs,
- unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>> alarmSet);
+ unordered_set<sp<const InternalAlarm>, SpHash<InternalAlarm>>& alarmSet);
+
+ void flushRestrictedDataLocked(const int64_t elapsedRealtimeNs);
+
+ void flushRestrictedDataIfNecessaryLocked(const int64_t elapsedRealtimeNs);
+
+ /* Tells LogEventFilter about atom ids to parse */
+ void updateLogEventFilterLocked() const;
// Function used to send a broadcast so that receiver for the config key can call getData
// to retrieve the stored data.
@@ -276,6 +336,12 @@
// are currently active.
std::function<bool(const int& uid, const vector<int64_t>& configIds)> mSendActivationBroadcast;
+ // Function used to send a broadcast if necessary so the receiver can be notified of the
+ // restricted metrics for the given config.
+ std::function<void(const ConfigKey& key, const string& delegatePackage,
+ const vector<int64_t>& restrictedMetricIds)>
+ mSendRestrictedMetricsBroadcast;
+
const int64_t mTimeBaseNs;
// Largest timestamp of the events that we have processed.
@@ -299,6 +365,7 @@
bool mPrintAllLogs = false;
+ friend class StatsLogProcessorTestRestricted;
FRIEND_TEST(StatsLogProcessorTest, TestOutOfOrderLogs);
FRIEND_TEST(StatsLogProcessorTest, TestRateLimitByteSize);
FRIEND_TEST(StatsLogProcessorTest, TestRateLimitBroadcast);
@@ -310,7 +377,22 @@
FRIEND_TEST(StatsLogProcessorTest,
TestActivationOnBootMultipleActivationsDifferentActivationTypes);
FRIEND_TEST(StatsLogProcessorTest, TestActivationsPersistAcrossSystemServerRestart);
-
+ FRIEND_TEST(StatsLogProcessorTest, LogEventFilterOnSetPrintLogs);
+ FRIEND_TEST(StatsLogProcessorTest, TestUidMapHasSnapshot);
+ FRIEND_TEST(StatsLogProcessorTest, TestEmptyConfigHasNoUidMap);
+ FRIEND_TEST(StatsLogProcessorTest, TestReportIncludesSubConfig);
+ FRIEND_TEST(StatsLogProcessorTest, TestPullUidProviderSetOnConfigUpdate);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, TestInconsistentRestrictedMetricsConfigUpdate);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, TestRestrictedLogEventPassed);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, TestRestrictedLogEventNotPassed);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, RestrictedMetricsManagerOnDumpReportNotCalled);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, NonRestrictedMetricsManagerOnDumpReportCalled);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, RestrictedMetricOnDumpReportEmpty);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, NonRestrictedMetricOnDumpReportNotEmpty);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, RestrictedMetricNotWriteToDisk);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, NonRestrictedMetricWriteToDisk);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, RestrictedMetricFlushIfReachMemoryLimit);
+ FRIEND_TEST(StatsLogProcessorTestRestricted, RestrictedMetricNotFlushIfNotReachMemoryLimit);
FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensionsForSumDuration1);
FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensionsForSumDuration2);
FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensionsForSumDuration3);
@@ -321,6 +403,8 @@
FRIEND_TEST(MetricConditionLinkE2eTest, TestMultiplePredicatesAndLinks2);
FRIEND_TEST(AttributionE2eTest, TestAttributionMatchAndSliceByFirstUid);
FRIEND_TEST(AttributionE2eTest, TestAttributionMatchAndSliceByChain);
+ FRIEND_TEST(GaugeMetricE2ePulledTest, TestFirstNSamplesPulledNoTrigger);
+ FRIEND_TEST(GaugeMetricE2ePulledTest, TestFirstNSamplesPulledNoTriggerWithActivation);
FRIEND_TEST(GaugeMetricE2ePushedTest, TestMultipleFieldsForPushedEvent);
FRIEND_TEST(GaugeMetricE2ePushedTest, TestRepeatedFieldsForPushedEvent);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvents);
@@ -328,16 +412,30 @@
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsWithActivation);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsNoCondition);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestConditionChangeToTrueSamplePulledEvents);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestEnforceTtlRemovesOldEvents);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestFlagDisabled);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestLogEventsEnforceTtls);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestQueryEnforceTtls);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestLogEventsDoesNotEnforceTtls);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestNotFlushed);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestFlushInWriteDataToDisk);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestFlushPeriodically);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestTTlsEnforceDbGuardrails);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestOnLogEventMalformedDbNameDeleted);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestEnforceDbGuardrails);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestEnforceDbGuardrailsDoesNotDeleteBeforeGuardrail);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestRestrictedMetricLoadsTtlFromDisk);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestSlicedCountMetric_single_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestSlicedCountMetric_multiple_buckets);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestCountMetric_save_refractory_to_disk_no_data_written);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestCountMetric_save_refractory_to_disk);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestCountMetric_load_refractory_from_disk);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_partial_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest, TestSlicedCountMetric_single_bucket);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest, TestSlicedCountMetric_multiple_buckets);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest,
+ TestCountMetric_save_refractory_to_disk_no_data_written);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest, TestCountMetric_save_refractory_to_disk);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest, TestCountMetric_load_refractory_from_disk);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_partial_bucket);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
FRIEND_TEST(AlarmE2eTest, TestMultipleAlarms);
FRIEND_TEST(ConfigTtlE2eTest, TestCountMetric);
@@ -386,6 +484,8 @@
FRIEND_TEST(ValueMetricE2eTest, TestInitWithValueFieldPositionALL);
FRIEND_TEST(KllMetricE2eTest, TestInitWithKllFieldPositionALL);
+
+ FRIEND_TEST(StatsServiceStatsdInitTest, StatsServiceStatsdInitTest);
};
} // namespace statsd
diff --git a/statsd/src/StatsService.cpp b/statsd/src/StatsService.cpp
index ef21a04..f1d48a1 100644
--- a/statsd/src/StatsService.cpp
+++ b/statsd/src/StatsService.cpp
@@ -18,20 +18,15 @@
#include "Log.h"
#include "StatsService.h"
-#include "stats_log_util.h"
-#include "android-base/stringprintf.h"
-#include "config/ConfigKey.h"
-#include "config/ConfigManager.h"
-#include "guardrail/StatsdStats.h"
-#include "storage/StorageManager.h"
-#include "subscriber/SubscriberReporter.h"
#include <android-base/file.h>
#include <android-base/strings.h>
+#include <android-modules-utils/sdk_level.h>
+#include <android/binder_ibinder_platform.h>
#include <cutils/multiuser.h>
+#include <private/android_filesystem_config.h>
#include <src/statsd_config.pb.h>
#include <src/uid_data.pb.h>
-#include <private/android_filesystem_config.h>
#include <statslog_statsd.h>
#include <stdio.h>
#include <stdlib.h>
@@ -39,9 +34,20 @@
#include <unistd.h>
#include <utils/String16.h>
+#include "android-base/stringprintf.h"
+#include "config/ConfigKey.h"
+#include "config/ConfigManager.h"
+#include "flags/FlagProvider.h"
+#include "guardrail/StatsdStats.h"
+#include "stats_log_util.h"
+#include "storage/StorageManager.h"
+#include "subscriber/SubscriberReporter.h"
+#include "utils/DbUtils.h"
+
using namespace android;
using android::base::StringPrintf;
+using android::modules::sdklevel::IsAtLeastU;
using android::util::FIELD_COUNT_REPEATED;
using android::util::FIELD_TYPE_MESSAGE;
@@ -53,9 +59,9 @@
constexpr const char* kPermissionDump = "android.permission.DUMP";
-constexpr const char* kPermissionRegisterPullAtom = "android.permission.REGISTER_STATS_PULL_ATOM";
+constexpr const char* kTracedProbesSid = "u:r:traced_probes:s0";
-constexpr const char* kIncludeCertificateHash = "include_certificate_hash";
+constexpr const char* kPermissionRegisterPullAtom = "android.permission.REGISTER_STATS_PULL_ATOM";
#define STATS_SERVICE_DIR "/data/misc/stats-service"
@@ -90,8 +96,39 @@
} \
}
-StatsService::StatsService(const sp<Looper>& handlerLooper, shared_ptr<LogEventQueue> queue)
- : mAnomalyAlarmMonitor(new AlarmMonitor(
+Status checkSid(const char* expectedSid) {
+ const char* sid = nullptr;
+ if (__builtin_available(android __ANDROID_API_U__, *)) {
+ sid = AIBinder_getCallingSid();
+ }
+
+ // root (which is the uid in tests for example) has all permissions.
+ uid_t uid = AIBinder_getCallingUid();
+ if (uid == AID_ROOT) {
+ return Status::ok();
+ }
+
+ if (sid != nullptr && strcmp(expectedSid, sid) == 0) {
+ return Status::ok();
+ } else {
+ return exception(EX_SECURITY,
+ StringPrintf("SID '%s' is not expected SID '%s'", sid, expectedSid));
+ }
+}
+
+#define ENFORCE_SID(sid) \
+ { \
+ Status status = checkSid((sid)); \
+ if (!status.isOk()) { \
+ return status; \
+ } \
+ }
+
+StatsService::StatsService(const sp<UidMap>& uidMap, shared_ptr<LogEventQueue> queue,
+ const std::shared_ptr<LogEventFilter>& logEventFilter,
+ int initEventDelaySecs)
+ : mUidMap(uidMap),
+ mAnomalyAlarmMonitor(new AlarmMonitor(
MIN_DIFF_TO_UPDATE_REGISTERED_ALARM_SECS,
[this](const shared_ptr<IStatsCompanionService>& /*sc*/, int64_t timeMillis) {
mProcessor->setAnomalyAlarm(timeMillis);
@@ -115,12 +152,13 @@
StatsdStats::getInstance().noteRegisteredPeriodicAlarmChanged();
}
})),
- mEventQueue(queue),
+ mEventQueue(std::move(queue)),
+ mLogEventFilter(logEventFilter),
mBootCompleteTrigger({kBootCompleteTag, kUidMapReceivedTag, kAllPullersRegisteredTag},
- [this]() { mProcessor->onStatsdInitCompleted(getElapsedRealtimeNs()); }),
+ [this]() { onStatsdInitCompleted(); }),
mStatsCompanionServiceDeathRecipient(
- AIBinder_DeathRecipient_new(StatsService::statsCompanionServiceDied)) {
- mUidMap = UidMap::getInstance();
+ AIBinder_DeathRecipient_new(StatsService::statsCompanionServiceDied)),
+ mInitEventDelaySecs(initEventDelaySecs) {
mPullerManager = new StatsPullerManager();
StatsPuller::SetUidMap(mUidMap);
mConfigManager = new ConfigManager();
@@ -162,7 +200,29 @@
}
VLOG("StatsService::active configs broadcast failed for uid %d", uid);
return false;
- });
+ },
+ [this](const ConfigKey& key, const string& delegatePackage,
+ const vector<int64_t>& restrictedMetrics) {
+ set<string> configPackages;
+ set<int32_t> delegateUids;
+ for (const auto& kv : UidMap::sAidToUidMapping) {
+ if (kv.second == static_cast<uint32_t>(key.GetUid())) {
+ configPackages.insert(kv.first);
+ }
+ if (kv.first == delegatePackage) {
+ delegateUids.insert(kv.second);
+ }
+ }
+ if (configPackages.empty()) {
+ configPackages = mUidMap->getAppNamesFromUid(key.GetUid(), true);
+ }
+ if (delegateUids.empty()) {
+ delegateUids = mUidMap->getAppUid(delegatePackage);
+ }
+ mConfigManager->SendRestrictedMetricsBroadcast(configPackages, key.GetId(),
+ delegateUids, restrictedMetrics);
+ },
+ logEventFilter);
mUidMap->setListener(mProcessor);
mConfigManager->AddListener(mProcessor);
@@ -357,12 +417,7 @@
}
if (!utf8Args[0].compare(String8("data-subscribe"))) {
- {
- std::lock_guard<std::mutex> lock(mShellSubscriberMutex);
- if (mShellSubscriber == nullptr) {
- mShellSubscriber = new ShellSubscriber(mUidMap, mPullerManager);
- }
- }
+ initShellSubscriber();
int timeoutSec = -1;
if (argc >= 2) {
timeoutSec = atoi(utf8Args[1].c_str());
@@ -792,8 +847,7 @@
}
if (good) {
dprintf(out, "Logging AppBreadcrumbReported(%d, %d, %d) to statslog.\n", uid, label, state);
- android::os::statsd::util::stats_write(
- android::os::statsd::util::APP_BREADCRUMB_REPORTED, uid, label, state);
+ util::stats_write(util::APP_BREADCRUMB_REPORTED, uid, label, state);
} else {
print_cmd_help(out);
return UNKNOWN_ERROR;
@@ -1079,10 +1133,28 @@
return Status::ok();
}
+void StatsService::onStatsdInitCompleted() {
+ if (mInitEventDelaySecs > 0) {
+ // The hard-coded delay is determined based on perfetto traces evaluation
+ // for statsd during the boot.
+ // The delay is required to properly process event storm which often has place
+ // after device boot.
+ // This function is called from a dedicated thread without holding locks, so sleeping is ok.
+ // See MultiConditionTrigger::markComplete() executorThread for details
+ // For more details see http://b/277958338
+ std::this_thread::sleep_for(std::chrono::seconds(mInitEventDelaySecs));
+ }
+
+ mProcessor->onStatsdInitCompleted(getElapsedRealtimeNs());
+}
+
void StatsService::Startup() {
mConfigManager->Startup();
+ int64_t wallClockNs = getWallClockNs();
+ int64_t elapsedRealtimeNs = getElapsedRealtimeNs();
mProcessor->LoadActiveConfigsFromDisk();
- mProcessor->LoadMetadataFromDisk(getWallClockNs(), getElapsedRealtimeNs());
+ mProcessor->LoadMetadataFromDisk(wallClockNs, elapsedRealtimeNs);
+ mProcessor->EnforceDataTtls(wallClockNs, elapsedRealtimeNs);
}
void StatsService::Terminate() {
@@ -1307,11 +1379,8 @@
Status StatsService::updateProperties(const vector<PropertyParcel>& properties) {
ENFORCE_UID(AID_SYSTEM);
- for (const auto& [property, value] : properties) {
- if (property == kIncludeCertificateHash) {
- mUidMap->setIncludeCertificateHash(value == "true");
- }
- }
+ // TODO(b/281765292): Forward statsd_java properties received here to FlagProvider.
+
return Status::ok();
}
@@ -1349,6 +1418,90 @@
mPullerManager->SetStatsCompanionService(nullptr);
}
+Status StatsService::setRestrictedMetricsChangedOperation(const int64_t configId,
+ const string& configPackage,
+ const shared_ptr<IPendingIntentRef>& pir,
+ const int32_t callingUid,
+ vector<int64_t>* output) {
+ ENFORCE_UID(AID_SYSTEM);
+ if (!IsAtLeastU()) {
+ ALOGW("setRestrictedMetricsChangedOperation invoked on U- device");
+ return Status::ok();
+ }
+ mConfigManager->SetRestrictedMetricsChangedReceiver(configPackage, configId, callingUid, pir);
+ if (output != nullptr) {
+ mProcessor->fillRestrictedMetrics(configId, configPackage, callingUid, output);
+ } else {
+ ALOGW("StatsService::setRestrictedMetricsChangedOperation output was nullptr");
+ }
+ return Status::ok();
+}
+
+Status StatsService::removeRestrictedMetricsChangedOperation(const int64_t configId,
+ const string& configPackage,
+ const int32_t callingUid) {
+ ENFORCE_UID(AID_SYSTEM);
+ if (!IsAtLeastU()) {
+ ALOGW("removeRestrictedMetricsChangedOperation invoked on U- device");
+ return Status::ok();
+ }
+ mConfigManager->RemoveRestrictedMetricsChangedReceiver(configPackage, configId, callingUid);
+ return Status::ok();
+}
+
+Status StatsService::querySql(const string& sqlQuery, const int32_t minSqlClientVersion,
+ const optional<vector<uint8_t>>& policyConfig,
+ const shared_ptr<IStatsQueryCallback>& callback,
+ const int64_t configKey, const string& configPackage,
+ const int32_t callingUid) {
+ ENFORCE_UID(AID_SYSTEM);
+ if (callback == nullptr) {
+ ALOGW("querySql called with null callback.");
+ StatsdStats::getInstance().noteQueryRestrictedMetricFailed(
+ configKey, configPackage, std::nullopt, callingUid,
+ InvalidQueryReason(NULL_CALLBACK));
+ return Status::ok();
+ }
+ mProcessor->querySql(sqlQuery, minSqlClientVersion, policyConfig, callback, configKey,
+ configPackage, callingUid);
+ return Status::ok();
+}
+
+Status StatsService::addSubscription(const vector<uint8_t>& subscriptionConfig,
+ const shared_ptr<IStatsSubscriptionCallback>& callback) {
+ ENFORCE_SID(kTracedProbesSid);
+
+ initShellSubscriber();
+
+ mShellSubscriber->startNewSubscription(subscriptionConfig, callback);
+ return Status::ok();
+}
+
+Status StatsService::removeSubscription(const shared_ptr<IStatsSubscriptionCallback>& callback) {
+ ENFORCE_SID(kTracedProbesSid);
+
+ if (mShellSubscriber != nullptr) {
+ mShellSubscriber->unsubscribe(callback);
+ }
+ return Status::ok();
+}
+
+Status StatsService::flushSubscription(const shared_ptr<IStatsSubscriptionCallback>& callback) {
+ ENFORCE_SID(kTracedProbesSid);
+
+ if (mShellSubscriber != nullptr) {
+ mShellSubscriber->flushSubscription(callback);
+ }
+ return Status::ok();
+}
+
+void StatsService::initShellSubscriber() {
+ std::lock_guard<std::mutex> lock(mShellSubscriberMutex);
+ if (mShellSubscriber == nullptr) {
+ mShellSubscriber = new ShellSubscriber(mUidMap, mPullerManager, mLogEventFilter);
+ }
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/StatsService.h b/statsd/src/StatsService.h
index 790397f..4d05cc1 100644
--- a/statsd/src/StatsService.h
+++ b/statsd/src/StatsService.h
@@ -20,6 +20,7 @@
#include <aidl/android/os/BnStatsd.h>
#include <aidl/android/os/IPendingIntentRef.h>
#include <aidl/android/os/IPullAtomCallback.h>
+#include <aidl/android/os/IStatsSubscriptionCallback.h>
#include <aidl/android/util/PropertyParcel.h>
#include <gtest/gtest_prod.h>
#include <utils/Looper.h>
@@ -40,14 +41,16 @@
using namespace android::os;
using namespace std;
+using ::ndk::SpAIBinder;
using Status = ::ndk::ScopedAStatus;
using aidl::android::os::BnStatsd;
using aidl::android::os::IPendingIntentRef;
using aidl::android::os::IPullAtomCallback;
+using aidl::android::os::IStatsQueryCallback;
+using aidl::android::os::IStatsSubscriptionCallback;
using aidl::android::util::PropertyParcel;
using ::ndk::ScopedAIBinder_DeathRecipient;
using ::ndk::ScopedFileDescriptor;
-using std::shared_ptr;
namespace android {
namespace os {
@@ -55,7 +58,9 @@
class StatsService : public BnStatsd {
public:
- StatsService(const sp<Looper>& handlerLooper, std::shared_ptr<LogEventQueue> queue);
+ StatsService(const sp<UidMap>& uidMap, shared_ptr<LogEventQueue> queue,
+ const std::shared_ptr<LogEventFilter>& logEventFilter,
+ int initEventDelaySecs = kStatsdInitDelaySecs);
virtual ~StatsService();
/** The anomaly alarm registered with AlarmManager won't be updated by less than this. */
@@ -174,7 +179,7 @@
*/
virtual Status registerPullAtomCallback(
int32_t uid, int32_t atomTag, int64_t coolDownMillis, int64_t timeoutMillis,
- const std::vector<int32_t>& additiveFields,
+ const vector<int32_t>& additiveFields,
const shared_ptr<IPullAtomCallback>& pullerCallback) override;
/**
@@ -182,7 +187,7 @@
*/
virtual Status registerNativePullAtomCallback(
int32_t atomTag, int64_t coolDownMillis, int64_t timeoutMillis,
- const std::vector<int32_t>& additiveFields,
+ const vector<int32_t>& additiveFields,
const shared_ptr<IPullAtomCallback>& pullerCallback) override;
/**
@@ -198,12 +203,59 @@
/**
* Binder call to get registered experiment IDs.
*/
- virtual Status getRegisteredExperimentIds(std::vector<int64_t>* expIdsOut);
+ virtual Status getRegisteredExperimentIds(vector<int64_t>* expIdsOut);
/**
* Binder call to update properties in statsd_java namespace.
*/
- virtual Status updateProperties(const std::vector<PropertyParcel>& properties);
+ virtual Status updateProperties(const vector<PropertyParcel>& properties);
+
+ /**
+ * Binder call to let clients register the restricted metrics changed operation for the given
+ * config and calling uid.
+ */
+ virtual Status setRestrictedMetricsChangedOperation(const int64_t configKey,
+ const string& configPackage,
+ const shared_ptr<IPendingIntentRef>& pir,
+ const int32_t callingUid,
+ vector<int64_t>* output);
+
+ /**
+ * Binder call to remove the restricted metrics changed operation for the specified config
+ * and calling uid.
+ */
+ virtual Status removeRestrictedMetricsChangedOperation(const int64_t configKey,
+ const string& configPackage,
+ const int32_t callingUid);
+
+ /**
+ * Binder call to query data in statsd sql store.
+ */
+ virtual Status querySql(const string& sqlQuery, const int32_t minSqlClientVersion,
+ const optional<vector<uint8_t>>& policyConfig,
+ const shared_ptr<IStatsQueryCallback>& callback,
+ const int64_t configKey, const string& configPackage,
+ const int32_t callingUid);
+
+ /**
+ * Binder call to add a subscription.
+ */
+ virtual Status addSubscription(const vector<uint8_t>& subscriptionConfig,
+ const shared_ptr<IStatsSubscriptionCallback>& callback) override;
+
+ /**
+ * Binder call to remove a subscription.
+ */
+ virtual Status removeSubscription(
+ const shared_ptr<IStatsSubscriptionCallback>& callback) override;
+
+ /**
+ * Binder call to flush atom events for a subscription.
+ */
+ virtual Status flushSubscription(
+ const shared_ptr<IStatsSubscriptionCallback>& callback) override;
+
+ const static int kStatsdInitDelaySecs = 90;
private:
/**
@@ -347,9 +399,19 @@
void statsCompanionServiceDiedImpl();
/**
+ * Initialize ShellSubscriber
+ */
+ void initShellSubscriber();
+
+ /*
+ * Notify StatsLogProcessor of boot completed
+ */
+ void onStatsdInitCompleted();
+
+ /**
* Tracks the uid <--> package name mapping.
*/
- sp<UidMap> mUidMap;
+ const sp<UidMap> mUidMap;
/**
* Fetches external metrics
@@ -387,7 +449,8 @@
* Mutex for setting the shell subscriber
*/
mutable mutex mShellSubscriberMutex;
- std::shared_ptr<LogEventQueue> mEventQueue;
+ shared_ptr<LogEventQueue> mEventQueue;
+ std::shared_ptr<LogEventFilter> mLogEventFilter;
MultiConditionTrigger mBootCompleteTrigger;
static const inline string kBootCompleteTag = "BOOT_COMPLETE";
@@ -396,6 +459,12 @@
ScopedAIBinder_DeathRecipient mStatsCompanionServiceDeathRecipient;
+ const int mInitEventDelaySecs;
+
+ friend class StatsServiceConfigTest;
+ friend class StatsServiceStatsdInitTest;
+ friend class RestrictedConfigE2ETest;
+
FRIEND_TEST(StatsLogProcessorTest, TestActivationsPersistAcrossSystemServerRestart);
FRIEND_TEST(StatsServiceTest, TestAddConfig_simple);
FRIEND_TEST(StatsServiceTest, TestAddConfig_empty);
@@ -414,13 +483,18 @@
FRIEND_TEST(PartialBucketE2eTest, TestGaugeMetricWithoutMinPartialBucket);
FRIEND_TEST(PartialBucketE2eTest, TestGaugeMetricWithMinPartialBucket);
FRIEND_TEST(PartialBucketE2eTest, TestCountMetricNoSplitByDefault);
-
+ FRIEND_TEST(RestrictedConfigE2ETest, NonRestrictedConfigGetReport);
+ FRIEND_TEST(RestrictedConfigE2ETest, RestrictedConfigNoReport);
+ FRIEND_TEST(RestrictedConfigE2ETest,
+ TestSendRestrictedMetricsChangedBroadcastMultipleMatchedConfigs);
FRIEND_TEST(ConfigUpdateE2eTest, TestAnomalyDurationMetric);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_partial_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_partial_bucket);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
+
+ FRIEND_TEST(StatsServiceStatsdInitTest, StatsServiceStatsdInitTest);
};
} // namespace statsd
diff --git a/statsd/src/annotations.h b/statsd/src/annotations.h
deleted file mode 100644
index cf7f543..0000000
--- a/statsd/src/annotations.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-namespace android {
-namespace os {
-namespace statsd {
-
-const uint8_t ANNOTATION_ID_IS_UID = 1;
-const uint8_t ANNOTATION_ID_TRUNCATE_TIMESTAMP = 2;
-const uint8_t ANNOTATION_ID_PRIMARY_FIELD = 3;
-const uint8_t ANNOTATION_ID_EXCLUSIVE_STATE = 4;
-const uint8_t ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID = 5;
-const uint8_t ANNOTATION_ID_TRIGGER_STATE_RESET = 7;
-const uint8_t ANNOTATION_ID_STATE_NESTED = 8;
-
-} // namespace statsd
-} // namespace os
-} // namespace android
diff --git a/statsd/src/anomaly/AnomalyTracker.cpp b/statsd/src/anomaly/AnomalyTracker.cpp
index 68dc0aa..fe390ca 100644
--- a/statsd/src/anomaly/AnomalyTracker.cpp
+++ b/statsd/src/anomaly/AnomalyTracker.cpp
@@ -255,13 +255,15 @@
return false;
}
-std::pair<bool, uint64_t> AnomalyTracker::getProtoHash() const {
+std::pair<optional<InvalidConfigReason>, uint64_t> AnomalyTracker::getProtoHash() const {
string serializedAlert;
if (!mAlert.SerializeToString(&serializedAlert)) {
ALOGW("Unable to serialize alert %lld", (long long)mAlert.id());
- return {false, 0};
+ return {createInvalidConfigReasonWithAlert(INVALID_CONFIG_REASON_ALERT_SERIALIZATION_FAILED,
+ mAlert.metric_id(), mAlert.id()),
+ 0};
}
- return {true, Hash64(serializedAlert)};
+ return {nullopt, Hash64(serializedAlert)};
}
void AnomalyTracker::informSubscribers(const MetricDimensionKey& key, int64_t metric_id,
diff --git a/statsd/src/anomaly/AnomalyTracker.h b/statsd/src/anomaly/AnomalyTracker.h
index feb6bd9..281c1f6 100644
--- a/statsd/src/anomaly/AnomalyTracker.h
+++ b/statsd/src/anomaly/AnomalyTracker.h
@@ -22,15 +22,17 @@
#include "AlarmMonitor.h"
#include "config/ConfigKey.h"
+#include "guardrail/StatsdStats.h"
+#include "hash.h"
#include "src/statsd_config.pb.h" // Alert
#include "src/statsd_metadata.pb.h" // AlertMetadata
-#include "hash.h"
-#include "stats_util.h" // HashableDimensionKey and DimToValMap
+#include "stats_util.h" // HashableDimensionKey and DimToValMap
namespace android {
namespace os {
namespace statsd {
+using std::optional;
using std::shared_ptr;
using std::unordered_map;
@@ -109,7 +111,7 @@
return mNumOfPastBuckets;
}
- std::pair<bool, uint64_t> getProtoHash() const;
+ std::pair<optional<InvalidConfigReason>, uint64_t> getProtoHash() const;
// Sets an alarm for the given timestamp.
// Replaces previous alarm if one already exists.
@@ -217,10 +219,10 @@
FRIEND_TEST(AnomalyTrackerTest, TestSparseBuckets);
FRIEND_TEST(GaugeMetricProducerTest, TestAnomalyDetection);
FRIEND_TEST(CountMetricProducerTest, TestAnomalyDetectionUnSliced);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_partial_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_partial_bucket);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
FRIEND_TEST(ConfigUpdateTest, TestUpdateAlerts);
};
diff --git a/statsd/src/condition/CombinationConditionTracker.cpp b/statsd/src/condition/CombinationConditionTracker.cpp
index 7a97ab3..829ae06 100644
--- a/statsd/src/condition/CombinationConditionTracker.cpp
+++ b/statsd/src/condition/CombinationConditionTracker.cpp
@@ -35,11 +35,11 @@
VLOG("~CombinationConditionTracker() %lld", (long long)mConditionId);
}
-bool CombinationConditionTracker::init(const vector<Predicate>& allConditionConfig,
- const vector<sp<ConditionTracker>>& allConditionTrackers,
- const unordered_map<int64_t, int>& conditionIdIndexMap,
- vector<bool>& stack,
- vector<ConditionState>& conditionCache) {
+optional<InvalidConfigReason> CombinationConditionTracker::init(
+ const vector<Predicate>& allConditionConfig,
+ const vector<sp<ConditionTracker>>& allConditionTrackers,
+ const unordered_map<int64_t, int>& conditionIdIndexMap, vector<bool>& stack,
+ vector<ConditionState>& conditionCache) {
VLOG("Combination predicate init() %lld", (long long)mConditionId);
if (mInitialized) {
// All the children are guaranteed to be initialized, but the recursion is needed to
@@ -56,21 +56,24 @@
}
conditionCache[mIndex] =
evaluateCombinationCondition(mChildren, mLogicalOperation, conditionCache);
- return true;
+ return nullopt;
}
// mark this node as visited in the recursion stack.
stack[mIndex] = true;
Predicate_Combination combinationCondition = allConditionConfig[mIndex].combination();
+ optional<InvalidConfigReason> invalidConfigReason;
if (!combinationCondition.has_operation()) {
- return false;
+ return createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_CONDITION_NO_OPERATION,
+ mConditionId);
}
mLogicalOperation = combinationCondition.operation();
if (mLogicalOperation == LogicalOperation::NOT && combinationCondition.predicate_size() != 1) {
- return false;
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_NOT_OPERATION_IS_NOT_UNARY, mConditionId);
}
for (auto child : combinationCondition.predicate()) {
@@ -78,7 +81,10 @@
if (it == conditionIdIndexMap.end()) {
ALOGW("Predicate %lld not found in the config", (long long)child);
- return false;
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_CHILD_NOT_FOUND, mConditionId);
+ invalidConfigReason->conditionIds.push_back(child);
+ return invalidConfigReason;
}
int childIndex = it->second;
@@ -86,15 +92,19 @@
// if the child is a visited node in the recursion -> circle detected.
if (stack[childIndex]) {
ALOGW("Circle detected!!!");
- return false;
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_CYCLE, mConditionId);
+ invalidConfigReason->conditionIds.push_back(child);
+ return invalidConfigReason;
}
- bool initChildSucceeded = childTracker->init(allConditionConfig, allConditionTrackers,
- conditionIdIndexMap, stack, conditionCache);
+ invalidConfigReason = childTracker->init(allConditionConfig, allConditionTrackers,
+ conditionIdIndexMap, stack, conditionCache);
- if (!initChildSucceeded) {
+ if (invalidConfigReason.has_value()) {
ALOGW("Child initialization failed %lld ", (long long)child);
- return false;
+ invalidConfigReason->conditionIds.push_back(mConditionId);
+ return invalidConfigReason;
} else {
VLOG("Child initialization success %lld ", (long long)child);
}
@@ -120,10 +130,10 @@
mInitialized = true;
- return true;
+ return nullopt;
}
-bool CombinationConditionTracker::onConfigUpdated(
+optional<InvalidConfigReason> CombinationConditionTracker::onConfigUpdated(
const vector<Predicate>& allConditionProtos, const int index,
const vector<sp<ConditionTracker>>& allConditionTrackers,
const unordered_map<int64_t, int>& atomMatchingTrackerMap,
@@ -135,23 +145,30 @@
mUnSlicedChildren.clear();
mSlicedChildren.clear();
Predicate_Combination combinationCondition = allConditionProtos[mIndex].combination();
+ optional<InvalidConfigReason> invalidConfigReason;
for (const int64_t child : combinationCondition.predicate()) {
const auto& it = conditionTrackerMap.find(child);
if (it == conditionTrackerMap.end()) {
ALOGW("Predicate %lld not found in the config", (long long)child);
- return false;
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_CHILD_NOT_FOUND, mConditionId);
+ invalidConfigReason->conditionIds.push_back(child);
+ return invalidConfigReason;
}
int childIndex = it->second;
const sp<ConditionTracker>& childTracker = allConditionTrackers[childIndex];
// Ensures that the child's tracker indices are updated.
- if (!childTracker->onConfigUpdated(allConditionProtos, childIndex, allConditionTrackers,
- atomMatchingTrackerMap, conditionTrackerMap)) {
+ invalidConfigReason =
+ childTracker->onConfigUpdated(allConditionProtos, childIndex, allConditionTrackers,
+ atomMatchingTrackerMap, conditionTrackerMap);
+ if (invalidConfigReason.has_value()) {
ALOGW("Child update failed %lld ", (long long)child);
- return false;
+ invalidConfigReason->conditionIds.push_back(child);
+ return invalidConfigReason;
}
if (allConditionTrackers[childIndex]->isSliced()) {
@@ -163,7 +180,7 @@
mTrackerIndex.insert(childTracker->getAtomMatchingTrackerIndex().begin(),
childTracker->getAtomMatchingTrackerIndex().end());
}
- return true;
+ return nullopt;
}
void CombinationConditionTracker::isConditionMet(
diff --git a/statsd/src/condition/CombinationConditionTracker.h b/statsd/src/condition/CombinationConditionTracker.h
index 3889e56..82463f0 100644
--- a/statsd/src/condition/CombinationConditionTracker.h
+++ b/statsd/src/condition/CombinationConditionTracker.h
@@ -30,15 +30,17 @@
~CombinationConditionTracker();
- bool init(const std::vector<Predicate>& allConditionConfig,
- const std::vector<sp<ConditionTracker>>& allConditionTrackers,
- const std::unordered_map<int64_t, int>& conditionIdIndexMap, std::vector<bool>& stack,
- std::vector<ConditionState>& conditionCache) override;
+ optional<InvalidConfigReason> init(
+ const std::vector<Predicate>& allConditionConfig,
+ const std::vector<sp<ConditionTracker>>& allConditionTrackers,
+ const std::unordered_map<int64_t, int>& conditionIdIndexMap, std::vector<bool>& stack,
+ std::vector<ConditionState>& conditionCache) override;
- bool onConfigUpdated(const std::vector<Predicate>& allConditionProtos, const int index,
- const std::vector<sp<ConditionTracker>>& allConditionTrackers,
- const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
- const std::unordered_map<int64_t, int>& conditionTrackerMap) override;
+ optional<InvalidConfigReason> onConfigUpdated(
+ const std::vector<Predicate>& allConditionProtos, const int index,
+ const std::vector<sp<ConditionTracker>>& allConditionTrackers,
+ const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ const std::unordered_map<int64_t, int>& conditionTrackerMap) override;
void evaluateCondition(const LogEvent& event,
const std::vector<MatchingState>& eventMatcherValues,
diff --git a/statsd/src/condition/ConditionTracker.h b/statsd/src/condition/ConditionTracker.h
index 4ce8cb1..2aa3ff0 100644
--- a/statsd/src/condition/ConditionTracker.h
+++ b/statsd/src/condition/ConditionTracker.h
@@ -54,10 +54,11 @@
// stack: a bit map to keep track which nodes have been visited on the stack in the recursion.
// conditionCache: tracks initial conditions of all ConditionTrackers. returns the
// current condition if called on a config update.
- virtual bool init(const std::vector<Predicate>& allConditionConfig,
- const std::vector<sp<ConditionTracker>>& allConditionTrackers,
- const std::unordered_map<int64_t, int>& conditionIdIndexMap,
- std::vector<bool>& stack, std::vector<ConditionState>& conditionCache) = 0;
+ virtual optional<InvalidConfigReason> init(
+ const std::vector<Predicate>& allConditionConfig,
+ const std::vector<sp<ConditionTracker>>& allConditionTrackers,
+ const std::unordered_map<int64_t, int>& conditionIdIndexMap, std::vector<bool>& stack,
+ std::vector<ConditionState>& conditionCache) = 0;
// Update appropriate state on config updates. Primarily, all indices need to be updated.
// This predicate and all of its children are guaranteed to be preserved across the update.
@@ -71,12 +72,13 @@
// atomMatchingTrackerMap: map of atom matcher id to index after the config update.
// conditionTrackerMap: map of condition tracker id to index after the config update.
// returns whether or not the update is successful.
- virtual bool onConfigUpdated(const std::vector<Predicate>& allConditionProtos, const int index,
- const std::vector<sp<ConditionTracker>>& allConditionTrackers,
- const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
- const std::unordered_map<int64_t, int>& conditionTrackerMap) {
+ virtual optional<InvalidConfigReason> onConfigUpdated(
+ const std::vector<Predicate>& allConditionProtos, const int index,
+ const std::vector<sp<ConditionTracker>>& allConditionTrackers,
+ const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ const std::unordered_map<int64_t, int>& conditionTrackerMap) {
mIndex = index;
- return true;
+ return nullopt;
}
// evaluate current condition given the new event.
diff --git a/statsd/src/condition/SimpleConditionTracker.cpp b/statsd/src/condition/SimpleConditionTracker.cpp
index 66ce961..5d7d385 100644
--- a/statsd/src/condition/SimpleConditionTracker.cpp
+++ b/statsd/src/condition/SimpleConditionTracker.cpp
@@ -56,10 +56,11 @@
VLOG("~SimpleConditionTracker()");
}
-bool SimpleConditionTracker::init(const vector<Predicate>& allConditionConfig,
- const vector<sp<ConditionTracker>>& allConditionTrackers,
- const unordered_map<int64_t, int>& conditionIdIndexMap,
- vector<bool>& stack, vector<ConditionState>& conditionCache) {
+optional<InvalidConfigReason> SimpleConditionTracker::init(
+ const vector<Predicate>& allConditionConfig,
+ const vector<sp<ConditionTracker>>& allConditionTrackers,
+ const unordered_map<int64_t, int>& conditionIdIndexMap, vector<bool>& stack,
+ vector<ConditionState>& conditionCache) {
// SimpleConditionTracker does not have dependency on other conditions, thus we just return
// if the initialization was successful.
ConditionKey conditionKey;
@@ -67,10 +68,14 @@
conditionKey[mConditionId] = DEFAULT_DIMENSION_KEY;
}
isConditionMet(conditionKey, allConditionTrackers, mSliced, conditionCache);
- return mInitialized;
+ if (!mInitialized) {
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_TRACKER_NOT_INITIALIZED, mConditionId);
+ }
+ return nullopt;
}
-bool SimpleConditionTracker::onConfigUpdated(
+optional<InvalidConfigReason> SimpleConditionTracker::onConfigUpdated(
const vector<Predicate>& allConditionProtos, const int index,
const vector<sp<ConditionTracker>>& allConditionTrackers,
const unordered_map<int64_t, int>& atomMatchingTrackerMap,
@@ -78,7 +83,7 @@
ConditionTracker::onConfigUpdated(allConditionProtos, index, allConditionTrackers,
atomMatchingTrackerMap, conditionTrackerMap);
setMatcherIndices(allConditionProtos[index].simple_predicate(), atomMatchingTrackerMap);
- return true;
+ return nullopt;
}
void SimpleConditionTracker::setMatcherIndices(
diff --git a/statsd/src/condition/SimpleConditionTracker.h b/statsd/src/condition/SimpleConditionTracker.h
index 4dc5660..4687d8e 100644
--- a/statsd/src/condition/SimpleConditionTracker.h
+++ b/statsd/src/condition/SimpleConditionTracker.h
@@ -35,15 +35,17 @@
~SimpleConditionTracker();
- bool init(const std::vector<Predicate>& allConditionConfig,
- const std::vector<sp<ConditionTracker>>& allConditionTrackers,
- const std::unordered_map<int64_t, int>& conditionIdIndexMap, std::vector<bool>& stack,
- std::vector<ConditionState>& conditionCache) override;
+ optional<InvalidConfigReason> init(
+ const std::vector<Predicate>& allConditionConfig,
+ const std::vector<sp<ConditionTracker>>& allConditionTrackers,
+ const std::unordered_map<int64_t, int>& conditionIdIndexMap, std::vector<bool>& stack,
+ std::vector<ConditionState>& conditionCache) override;
- bool onConfigUpdated(const std::vector<Predicate>& allConditionProtos, const int index,
- const std::vector<sp<ConditionTracker>>& allConditionTrackers,
- const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
- const std::unordered_map<int64_t, int>& conditionTrackerMap) override;
+ optional<InvalidConfigReason> onConfigUpdated(
+ const std::vector<Predicate>& allConditionProtos, const int index,
+ const std::vector<sp<ConditionTracker>>& allConditionTrackers,
+ const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ const std::unordered_map<int64_t, int>& conditionTrackerMap) override;
void evaluateCondition(const LogEvent& event,
const std::vector<MatchingState>& eventMatcherValues,
diff --git a/statsd/src/config/ConfigKeyWithPackage.h b/statsd/src/config/ConfigKeyWithPackage.h
new file mode 100644
index 0000000..85e95d5
--- /dev/null
+++ b/statsd/src/config/ConfigKeyWithPackage.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+
+namespace android {
+namespace os {
+namespace statsd {
+
+using std::hash;
+using std::string;
+
+/**
+ * A config key that uses a package name instead of a uid. Generally, ConfigKey which uses a uid
+ * should be used. This is currently only used for restricted metrics changed operation.
+ */
+class ConfigKeyWithPackage {
+public:
+ ConfigKeyWithPackage(const string& package, const int64_t id) : mPackage(package), mId(id) {
+ }
+
+ inline string GetPackage() const {
+ return mPackage;
+ }
+ inline int64_t GetId() const {
+ return mId;
+ }
+
+ inline bool operator<(const ConfigKeyWithPackage& that) const {
+ if (mPackage != that.mPackage) {
+ return mPackage < that.mPackage;
+ }
+ return mId < that.mId;
+ };
+
+ inline bool operator==(const ConfigKeyWithPackage& that) const {
+ return mPackage == that.mPackage && mId == that.mId;
+ };
+
+private:
+ string mPackage;
+ int64_t mId;
+};
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/src/config/ConfigManager.cpp b/statsd/src/config/ConfigManager.cpp
index ca88c62..16c4e75 100644
--- a/statsd/src/config/ConfigManager.cpp
+++ b/statsd/src/config/ConfigManager.cpp
@@ -37,6 +37,8 @@
using std::string;
using std::vector;
+using Status = ::ndk::ScopedAStatus;
+
#define STATS_SERVICE_DIR "/data/misc/stats-service"
using android::base::StringPrintf;
@@ -102,9 +104,7 @@
// Add to set.
mConfigs[key.GetUid()].insert(key);
- for (const sp<ConfigListener>& listener : mListeners) {
- broadcastList.push_back(listener);
- }
+ broadcastList = mListeners;
}
const int64_t timestampNs = getElapsedRealtimeNs();
@@ -154,6 +154,83 @@
}
}
+void ConfigManager::SetRestrictedMetricsChangedReceiver(const string& configPackage,
+ const int64_t configId,
+ const int32_t callingUid,
+ const shared_ptr<IPendingIntentRef>& pir) {
+ lock_guard<mutex> lock(mMutex);
+ ConfigKeyWithPackage configKey(configPackage, configId);
+ mRestrictedMetricsChangedReceivers[configKey][callingUid] = pir;
+}
+
+void ConfigManager::RemoveRestrictedMetricsChangedReceiver(const string& configPackage,
+ const int64_t configId,
+ const int32_t callingUid) {
+ lock_guard<mutex> lock(mMutex);
+ ConfigKeyWithPackage configKey(configPackage, configId);
+ const auto& it = mRestrictedMetricsChangedReceivers.find(configKey);
+ if (it != mRestrictedMetricsChangedReceivers.end()) {
+ it->second.erase(callingUid);
+ if (it->second.empty()) {
+ mRestrictedMetricsChangedReceivers.erase(it);
+ }
+ }
+}
+
+void ConfigManager::RemoveRestrictedMetricsChangedReceiver(
+ const ConfigKeyWithPackage& key, const int32_t delegateUid,
+ const shared_ptr<IPendingIntentRef>& pir) {
+ lock_guard<mutex> lock(mMutex);
+ const auto& it = mRestrictedMetricsChangedReceivers.find(key);
+ if (it != mRestrictedMetricsChangedReceivers.end()) {
+ const auto& pirIt = it->second.find(delegateUid);
+ if (pirIt != it->second.end() && pirIt->second == pir) {
+ it->second.erase(delegateUid);
+ if (it->second.empty()) {
+ mRestrictedMetricsChangedReceivers.erase(it);
+ }
+ }
+ }
+}
+
+void ConfigManager::SendRestrictedMetricsBroadcast(const set<string>& configPackages,
+ const int64_t configId,
+ const set<int32_t>& delegateUids,
+ const vector<int64_t>& metricIds) {
+ map<ConfigKeyWithPackage, map<int32_t, shared_ptr<IPendingIntentRef>>> intentsToSend;
+ {
+ lock_guard<mutex> lock(mMutex);
+ // Invoke the pending intent for all matching configs, as long as the listening delegates
+ // match the allowed delegate uids specified by the config.
+ for (const string& configPackage : configPackages) {
+ ConfigKeyWithPackage key(configPackage, configId);
+ const auto& it = mRestrictedMetricsChangedReceivers.find(key);
+ if (it != mRestrictedMetricsChangedReceivers.end()) {
+ for (const auto& [delegateUid, pir] : it->second) {
+ if (delegateUids.find(delegateUid) != delegateUids.end()) {
+ intentsToSend[key][delegateUid] = pir;
+ }
+ }
+ }
+ }
+ }
+
+ // Invoke the pending intents without holding the lock.
+ for (const auto& [key, innerMap] : intentsToSend) {
+ for (const auto& [delegateUid, pir] : innerMap) {
+ Status status = pir->sendRestrictedMetricsChangedBroadcast(metricIds);
+ if (status.isOk()) {
+ VLOG("ConfigManager::SendRestrictedMetricsBroadcast succeeded");
+ }
+ if (status.getExceptionCode() == EX_TRANSACTION_FAILED &&
+ status.getStatus() == STATUS_DEAD_OBJECT) {
+ // Must also be called without the lock, since remove will acquire the lock.
+ RemoveRestrictedMetricsChangedReceiver(key, delegateUid, pir);
+ }
+ }
+ }
+}
+
void ConfigManager::RemoveConfig(const ConfigKey& key) {
vector<sp<ConfigListener>> broadcastList;
{
@@ -165,9 +242,7 @@
// Remove from map
uidIt->second.erase(key);
- for (const sp<ConfigListener>& listener : mListeners) {
- broadcastList.push_back(listener);
- }
+ broadcastList = mListeners;
}
// Remove from disk. There can still be a lingering file on disk so we check
@@ -185,6 +260,7 @@
StorageManager::deleteSuffixedFiles(STATS_SERVICE_DIR, suffix.c_str());
}
+// TODO(b/xxx): consider removing all receivers associated with this uid.
void ConfigManager::RemoveConfigs(int uid) {
vector<ConfigKey> removed;
vector<sp<ConfigListener>> broadcastList;
@@ -204,9 +280,7 @@
mConfigs.erase(uidIt);
- for (const sp<ConfigListener>& listener : mListeners) {
- broadcastList.push_back(listener);
- }
+ broadcastList = mListeners;
}
// Remove separately so if they do anything in the callback they can't mess up our iteration.
@@ -233,9 +307,7 @@
uidIt = mConfigs.erase(uidIt);
}
- for (const sp<ConfigListener>& listener : mListeners) {
- broadcastList.push_back(listener);
- }
+ broadcastList = mListeners;
}
// Remove separately so if they do anything in the callback they can't mess up our iteration.
diff --git a/statsd/src/config/ConfigManager.h b/statsd/src/config/ConfigManager.h
index 9a0f504..dd7d5bb 100644
--- a/statsd/src/config/ConfigManager.h
+++ b/statsd/src/config/ConfigManager.h
@@ -16,14 +16,15 @@
#pragma once
-#include "config/ConfigKey.h"
-#include "config/ConfigListener.h"
-
#include <aidl/android/os/IPendingIntentRef.h>
+#include <stdio.h>
+
#include <mutex>
#include <string>
-#include <stdio.h>
+#include "config/ConfigKey.h"
+#include "config/ConfigKeyWithPackage.h"
+#include "config/ConfigListener.h"
using aidl::android::os::IPendingIntentRef;
using std::shared_ptr;
@@ -113,6 +114,27 @@
const shared_ptr<IPendingIntentRef>& pir);
/**
+ * Sets the pending intent that is notified whenever the list of restricted metrics changes
+ */
+ void SetRestrictedMetricsChangedReceiver(const string& configPackage, const int64_t configId,
+ const int32_t callingUid,
+ const shared_ptr<IPendingIntentRef>& pir);
+
+ /**
+ * Erase any restricted metrics changed pending intents associated with this config key & uid.
+ */
+ void RemoveRestrictedMetricsChangedReceiver(const string& configPackage, const int64_t configId,
+ const int32_t callingUid);
+
+ /**
+ * Sends a restricted metrics broadcast for the valid config keys and delegate package
+ */
+ void SendRestrictedMetricsBroadcast(const std::set<string>& configPackages,
+ const int64_t configId,
+ const std::set<int32_t>& delegateUids,
+ const std::vector<int64_t>& metricIds);
+
+ /**
* A configuration was removed.
*
* Reports this to listeners.
@@ -166,9 +188,24 @@
std::map<int, shared_ptr<IPendingIntentRef>> mActiveConfigsChangedReceivers;
/**
+ * Each uid can subscribe up to one receiver for a particular config to receive the restricted
+ * metrics for that config. The receiver is specified as IPendingIntentRef.
+ */
+ std::map<ConfigKeyWithPackage, std::map<int32_t, shared_ptr<IPendingIntentRef>>>
+ mRestrictedMetricsChangedReceivers;
+
+ /**
* The ConfigListeners that will be told about changes.
*/
std::vector<sp<ConfigListener>> mListeners;
+
+ /**
+ * Erase the restricted metrics changed pending intents associated with this config key & uid if
+ * it is equal to the provided pending intent.
+ */
+ void RemoveRestrictedMetricsChangedReceiver(const ConfigKeyWithPackage& key,
+ const int32_t delegateUid,
+ const shared_ptr<IPendingIntentRef>& pir);
};
} // namespace statsd
diff --git a/statsd/src/external/PullDataReceiver.h b/statsd/src/external/PullDataReceiver.h
index 8e3a8a1..47e1377 100644
--- a/statsd/src/external/PullDataReceiver.h
+++ b/statsd/src/external/PullDataReceiver.h
@@ -23,17 +23,22 @@
namespace os {
namespace statsd {
+// Determine if pull was needed and if so, whether the pull was successful
+enum PullResult { PULL_RESULT_SUCCESS = 1, PULL_RESULT_FAIL = 2, PULL_NOT_NEEDED = 3 };
+
class PullDataReceiver : virtual public RefBase{
public:
virtual ~PullDataReceiver() {}
/**
* @param data The pulled data.
- * @param pullSuccess Whether the pull succeeded. If the pull does not succeed, the data for the
- * bucket should be invalidated.
+ * @param pullResult Whether the pull succeeded and was needed. If the pull does not succeed,
+ * the data for the bucket should be invalidated.
* @param originalPullTimeNs This is when all the pulls have been initiated (elapsed time).
*/
- virtual void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data, bool pullSuccess,
- int64_t originalPullTimeNs) = 0;
+ virtual void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data,
+ PullResult pullResult, int64_t originalPullTimeNs) = 0;
+
+ virtual bool isPullNeeded() const = 0;
};
} // namespace statsd
diff --git a/statsd/src/external/StatsPullerManager.cpp b/statsd/src/external/StatsPullerManager.cpp
index 90e1cd0..8502d4f 100644
--- a/statsd/src/external/StatsPullerManager.cpp
+++ b/statsd/src/external/StatsPullerManager.cpp
@@ -227,12 +227,22 @@
vector<ReceiverInfo*> receivers;
if (pair.second.size() != 0) {
for (ReceiverInfo& receiverInfo : pair.second) {
- if (receiverInfo.nextPullTimeNs <= elapsedTimeNs) {
+ // If pullNecessary and enough time has passed for the next bucket, then add
+ // receiver to the list that will pull on this alarm.
+ // If pullNecessary is false, check if next pull time needs to be updated.
+ sp<PullDataReceiver> receiverPtr = receiverInfo.receiver.promote();
+ const bool pullNecessary = receiverPtr != nullptr && receiverPtr->isPullNeeded();
+ if (receiverInfo.nextPullTimeNs <= elapsedTimeNs && pullNecessary) {
receivers.push_back(&receiverInfo);
} else {
- if (receiverInfo.nextPullTimeNs < minNextPullTimeNs) {
- minNextPullTimeNs = receiverInfo.nextPullTimeNs;
+ if (receiverInfo.nextPullTimeNs <= elapsedTimeNs) {
+ receiverPtr->onDataPulled({}, PullResult::PULL_NOT_NEEDED, elapsedTimeNs);
+ int numBucketsAhead = (elapsedTimeNs - receiverInfo.nextPullTimeNs) /
+ receiverInfo.intervalNs;
+ receiverInfo.nextPullTimeNs +=
+ (numBucketsAhead + 1) * receiverInfo.intervalNs;
}
+ minNextPullTimeNs = min(receiverInfo.nextPullTimeNs, minNextPullTimeNs);
}
}
if (receivers.size() > 0) {
@@ -242,9 +252,11 @@
}
for (const auto& pullInfo : needToPull) {
vector<shared_ptr<LogEvent>> data;
- bool pullSuccess = PullLocked(pullInfo.first->atomTag, pullInfo.first->configKey,
- elapsedTimeNs, &data);
- if (!pullSuccess) {
+ PullResult pullResult =
+ PullLocked(pullInfo.first->atomTag, pullInfo.first->configKey, elapsedTimeNs, &data)
+ ? PullResult::PULL_RESULT_SUCCESS
+ : PullResult::PULL_RESULT_FAIL;
+ if (pullResult == PullResult::PULL_RESULT_FAIL) {
VLOG("pull failed at %lld, will try again later", (long long)elapsedTimeNs);
}
@@ -263,14 +275,12 @@
for (const auto& receiverInfo : pullInfo.second) {
sp<PullDataReceiver> receiverPtr = receiverInfo->receiver.promote();
if (receiverPtr != nullptr) {
- receiverPtr->onDataPulled(data, pullSuccess, elapsedTimeNs);
+ receiverPtr->onDataPulled(data, pullResult, elapsedTimeNs);
// We may have just come out of a coma, compute next pull time.
int numBucketsAhead =
(elapsedTimeNs - receiverInfo->nextPullTimeNs) / receiverInfo->intervalNs;
receiverInfo->nextPullTimeNs += (numBucketsAhead + 1) * receiverInfo->intervalNs;
- if (receiverInfo->nextPullTimeNs < minNextPullTimeNs) {
- minNextPullTimeNs = receiverInfo->nextPullTimeNs;
- }
+ minNextPullTimeNs = min(receiverInfo->nextPullTimeNs, minNextPullTimeNs);
} else {
VLOG("receiver already gone.");
}
diff --git a/statsd/src/external/StatsPullerManager.h b/statsd/src/external/StatsPullerManager.h
index b503aa8..80a1331 100644
--- a/statsd/src/external/StatsPullerManager.h
+++ b/statsd/src/external/StatsPullerManager.h
@@ -164,6 +164,8 @@
int64_t mNextPullTimeNs;
+ FRIEND_TEST(GaugeMetricE2ePulledTest, TestFirstNSamplesPulledNoTrigger);
+ FRIEND_TEST(GaugeMetricE2ePulledTest, TestFirstNSamplesPulledNoTriggerWithActivation);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvents);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvent_LateAlarm);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsWithActivation);
diff --git a/statsd/src/flags/FlagProvider.h b/statsd/src/flags/FlagProvider.h
index 4180e88..8f24ac9 100644
--- a/statsd/src/flags/FlagProvider.h
+++ b/statsd/src/flags/FlagProvider.h
@@ -37,6 +37,9 @@
const std::string STATSD_NATIVE_NAMESPACE = "statsd_native";
const std::string STATSD_NATIVE_BOOT_NAMESPACE = "statsd_native_boot";
+const std::string OPTIMIZATION_SOCKET_PARSING_FLAG = "optimization_socket_parsing";
+const std::string STATSD_INIT_COMPLETED_NO_DELAY_FLAG = "statsd_init_completed_no_delay";
+
const std::string FLAG_TRUE = "true";
const std::string FLAG_FALSE = "false";
const std::string FLAG_EMPTY = "";
@@ -101,6 +104,7 @@
friend class ConfigUpdateE2eTest;
friend class ConfigUpdateTest;
friend class EventMetricE2eTest;
+ friend class ValueMetricE2eTest;
friend class GaugeMetricE2ePulledTest;
friend class GaugeMetricE2ePushedTest;
friend class EventMetricProducerTest;
@@ -109,11 +113,16 @@
friend class FlagProviderTest_SPlus_RealValues;
friend class KllMetricE2eAbTest;
friend class MetricsManagerTest;
- friend class PartialBucketE2e_AppUpgradeDefaultTest;
+ friend class StatsLogProcessorTest;
+ friend class StatsLogProcessorTestRestricted;
+ friend class RestrictedEventMetricProducerTest;
+ friend class RestrictedConfigE2ETest;
+ friend class RestrictedEventMetricE2eTest;
+ friend class LogEvent_FieldRestrictionTest;
- FRIEND_TEST(ConfigUpdateE2eTest, TestKllMetric_KllDisabledBeforeConfigUpdate);
FRIEND_TEST(ConfigUpdateE2eTest, TestEventMetric);
FRIEND_TEST(ConfigUpdateE2eTest, TestGaugeMetric);
+ FRIEND_TEST(ConfigUpdateE2eTest, TestConfigUpdateRestrictedDelegateCleared);
FRIEND_TEST(EventMetricE2eTest, TestEventMetricDataAggregated);
FRIEND_TEST(EventMetricProducerTest, TestOneAtomTagAggregatedEvents);
FRIEND_TEST(EventMetricProducerTest, TestTwoAtomTagAggregatedEvents);
@@ -125,9 +134,15 @@
FRIEND_TEST(FlagProviderTest_SPlus, TestGetFlagBoolServerFlagEmptyDefaultTrue);
FRIEND_TEST(FlagProviderTest_SPlus_RealValues, TestGetBootFlagBoolServerFlagTrue);
FRIEND_TEST(FlagProviderTest_SPlus_RealValues, TestGetBootFlagBoolServerFlagFalse);
- FRIEND_TEST(NumericValueMetricProducerTest_SubsetDimensions, TestSubsetDimensions);
- FRIEND_TEST(PartialBucketE2e_AppUpgradeDefaultTest, TestCountMetricDefaultFalse);
- FRIEND_TEST(PartialBucketE2e_AppUpgradeDefaultTest, TestCountMetricDefaultTrue);
+ FRIEND_TEST(MetricsManagerTest_SPlus, TestRestrictedMetricsConfig);
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestFlagDisabled);
+ FRIEND_TEST(LogEventTest, TestRestrictionCategoryAnnotation);
+ FRIEND_TEST(LogEventTest, TestInvalidRestrictionCategoryAnnotation);
+ FRIEND_TEST(LogEventTest, TestRestrictionCategoryAnnotationFlagDisabled);
+ FRIEND_TEST(LogEvent_FieldRestrictionTest, TestFieldRestrictionAnnotation);
+ FRIEND_TEST(LogEvent_FieldRestrictionTest, TestInvalidAnnotationIntType);
+ FRIEND_TEST(LogEvent_FieldRestrictionTest, TestInvalidAnnotationAtomLevel);
+ FRIEND_TEST(LogEvent_FieldRestrictionTest, TestRestrictionCategoryAnnotationFlagDisabled);
};
} // namespace statsd
diff --git a/statsd/src/guardrail/StatsdStats.cpp b/statsd/src/guardrail/StatsdStats.cpp
index 0e66019..7dd6d9a 100644
--- a/statsd/src/guardrail/StatsdStats.cpp
+++ b/statsd/src/guardrail/StatsdStats.cpp
@@ -19,9 +19,11 @@
#include "StatsdStats.h"
#include <android/util/ProtoOutputStream.h>
+
#include "../stats_log_util.h"
#include "statslog_statsd.h"
#include "storage/StorageManager.h"
+#include "utils/ShardOffsetProvider.h"
namespace android {
namespace os {
@@ -29,11 +31,13 @@
using android::util::FIELD_COUNT_REPEATED;
using android::util::FIELD_TYPE_BOOL;
+using android::util::FIELD_TYPE_ENUM;
using android::util::FIELD_TYPE_FLOAT;
using android::util::FIELD_TYPE_INT32;
using android::util::FIELD_TYPE_INT64;
using android::util::FIELD_TYPE_MESSAGE;
using android::util::FIELD_TYPE_STRING;
+using android::util::FIELD_TYPE_UINT32;
using android::util::ProtoOutputStream;
using std::lock_guard;
using std::shared_ptr;
@@ -52,10 +56,24 @@
const int FIELD_ID_LOGGER_ERROR_STATS = 16;
const int FIELD_ID_OVERFLOW = 18;
const int FIELD_ID_ACTIVATION_BROADCAST_GUARDRAIL = 19;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS = 20;
+const int FIELD_ID_SHARD_OFFSET = 21;
+
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_CALLING_UID = 1;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_CONFIG_ID = 2;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_CONFIG_UID = 3;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_CONFIG_PACKAGE = 4;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_INVALID_QUERY_REASON = 5;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_QUERY_WALL_TIME_NS = 6;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_HAS_ERROR = 7;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_ERROR = 8;
+const int FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_LATENCY_NS = 9;
const int FIELD_ID_ATOM_STATS_TAG = 1;
const int FIELD_ID_ATOM_STATS_COUNT = 2;
const int FIELD_ID_ATOM_STATS_ERROR_COUNT = 3;
+const int FIELD_ID_ATOM_STATS_DROPS_COUNT = 4;
+const int FIELD_ID_ATOM_STATS_SKIP_COUNT = 5;
const int FIELD_ID_ANOMALY_ALARMS_REGISTERED = 1;
const int FIELD_ID_PERIODIC_ALARMS_REGISTERED = 1;
@@ -81,6 +99,7 @@
const int FIELD_ID_CONFIG_STATS_MATCHER_COUNT = 7;
const int FIELD_ID_CONFIG_STATS_ALERT_COUNT = 8;
const int FIELD_ID_CONFIG_STATS_VALID = 9;
+const int FIELD_ID_CONFIG_STATS_INVALID_CONFIG_REASON = 24;
const int FIELD_ID_CONFIG_STATS_BROADCAST = 10;
const int FIELD_ID_CONFIG_STATS_DATA_DROP_TIME = 11;
const int FIELD_ID_CONFIG_STATS_DATA_DROP_BYTES = 21;
@@ -96,6 +115,21 @@
const int FIELD_ID_CONFIG_STATS_DEACTIVATION = 23;
const int FIELD_ID_CONFIG_STATS_ANNOTATION_INT64 = 1;
const int FIELD_ID_CONFIG_STATS_ANNOTATION_INT32 = 2;
+const int FIELD_ID_CONFIG_STATS_RESTRICTED_METRIC_STATS = 25;
+const int FIELD_ID_CONFIG_STATS_DEVICE_INFO_TABLE_CREATION_FAILED = 26;
+const int FIELD_ID_CONFIG_STATS_RESTRICTED_DB_CORRUPTED_COUNT = 27;
+const int FIELD_ID_CONFIG_STATS_RESTRICTED_CONFIG_FLUSH_LATENCY = 28;
+const int FIELD_ID_CONFIG_STATS_RESTRICTED_CONFIG_DB_SIZE_TIME_SEC = 29;
+const int FIELD_ID_CONFIG_STATS_RESTRICTED_CONFIG_DB_SIZE_BYTES = 30;
+
+const int FIELD_ID_INVALID_CONFIG_REASON_ENUM = 1;
+const int FIELD_ID_INVALID_CONFIG_REASON_METRIC_ID = 2;
+const int FIELD_ID_INVALID_CONFIG_REASON_STATE_ID = 3;
+const int FIELD_ID_INVALID_CONFIG_REASON_ALERT_ID = 4;
+const int FIELD_ID_INVALID_CONFIG_REASON_ALARM_ID = 5;
+const int FIELD_ID_INVALID_CONFIG_REASON_SUBSCRIPTION_ID = 6;
+const int FIELD_ID_INVALID_CONFIG_REASON_MATCHER_ID = 7;
+const int FIELD_ID_INVALID_CONFIG_REASON_CONDITION_ID = 8;
const int FIELD_ID_MATCHER_STATS_ID = 1;
const int FIELD_ID_MATCHER_STATS_COUNT = 2;
@@ -114,6 +148,14 @@
const int FIELD_ID_ACTIVATION_BROADCAST_GUARDRAIL_UID = 1;
const int FIELD_ID_ACTIVATION_BROADCAST_GUARDRAIL_TIME = 2;
+// for RestrictedMetricStats proto
+const int FIELD_ID_RESTRICTED_STATS_METRIC_ID = 1;
+const int FIELD_ID_RESTRICTED_STATS_INSERT_ERROR = 2;
+const int FIELD_ID_RESTRICTED_STATS_TABLE_CREATION_ERROR = 3;
+const int FIELD_ID_RESTRICTED_STATS_TABLE_DELETION_ERROR = 4;
+const int FIELD_ID_RESTRICTED_STATS_FLUSH_LATENCY = 5;
+const int FIELD_ID_RESTRICTED_STATS_CATEGORY_CHANGED_COUNT = 6;
+
const std::map<int, std::pair<size_t, size_t>> StatsdStats::kAtomDimensionKeySizeLimitMap = {
{util::BINDER_CALLS, {6000, 10000}},
{util::LOOPER_STATS, {1500, 2500}},
@@ -141,7 +183,7 @@
void StatsdStats::noteConfigReceived(
const ConfigKey& key, int metricsCount, int conditionsCount, int matchersCount,
int alertsCount, const std::list<std::pair<const int64_t, const int32_t>>& annotations,
- bool isValid) {
+ const optional<InvalidConfigReason>& reason) {
lock_guard<std::mutex> lock(mLock);
int32_t nowTimeSec = getWallClockSec();
@@ -156,12 +198,13 @@
configStats->condition_count = conditionsCount;
configStats->matcher_count = matchersCount;
configStats->alert_count = alertsCount;
- configStats->is_valid = isValid;
+ configStats->is_valid = !reason.has_value();
+ configStats->reason = reason;
for (auto& v : annotations) {
configStats->annotations.emplace_back(v);
}
- if (isValid) {
+ if (!reason.has_value()) {
mConfigStats[key] = configStats;
} else {
configStats->deletion_time_sec = nowTimeSec;
@@ -258,7 +301,8 @@
noteDataDropped(key, totalBytes, getWallClockSec());
}
-void StatsdStats::noteEventQueueOverflow(int64_t oldestEventTimestampNs) {
+void StatsdStats::noteEventQueueOverflow(int64_t oldestEventTimestampNs, int32_t atomId,
+ bool isSkipped) {
lock_guard<std::mutex> lock(mLock);
mOverflowCount++;
@@ -272,6 +316,17 @@
if (history < mMinQueueHistoryNs) {
mMinQueueHistoryNs = history;
}
+
+ noteAtomLoggedLocked(atomId, isSkipped);
+ noteAtomDroppedLocked(atomId);
+}
+
+void StatsdStats::noteAtomDroppedLocked(int32_t atomId) {
+ constexpr int kMaxPushedAtomDroppedStatsSize = kMaxPushedAtomId + kMaxNonPlatformPushedAtoms;
+ if (mPushedAtomDropsStats.size() < kMaxPushedAtomDroppedStatsSize ||
+ mPushedAtomDropsStats.find(atomId) != mPushedAtomDropsStats.end()) {
+ mPushedAtomDropsStats[atomId]++;
+ }
}
void StatsdStats::noteDataDropped(const ConfigKey& key, const size_t totalBytes, int32_t timeSec) {
@@ -307,6 +362,26 @@
it->second->dump_report_stats.push_back(std::make_pair(timeSec, num_bytes));
}
+void StatsdStats::noteDeviceInfoTableCreationFailed(const ConfigKey& key) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(key);
+ if (it == mConfigStats.end()) {
+ ALOGE("Config key %s not found!", key.ToString().c_str());
+ return;
+ }
+ it->second->device_info_table_creation_failed = true;
+}
+
+void StatsdStats::noteDbCorrupted(const ConfigKey& key) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(key);
+ if (it == mConfigStats.end()) {
+ ALOGE("Config key %s not found!", key.ToString().c_str());
+ return;
+ }
+ it->second->db_corrupted_count++;
+}
+
void StatsdStats::noteUidMapDropped(int deltas) {
lock_guard<std::mutex> lock(mLock);
mUidMapStats.dropped_changes += mUidMapStats.dropped_changes + deltas;
@@ -456,17 +531,24 @@
mPulledAtomStats[pullAtomId].pullExceedMaxDelay++;
}
-void StatsdStats::noteAtomLogged(int atomId, int32_t timeSec) {
+void StatsdStats::noteAtomLogged(int atomId, int32_t /*timeSec*/, bool isSkipped) {
lock_guard<std::mutex> lock(mLock);
+ noteAtomLoggedLocked(atomId, isSkipped);
+}
+
+void StatsdStats::noteAtomLoggedLocked(int atomId, bool isSkipped) {
if (atomId >= 0 && atomId <= kMaxPushedAtomId) {
- mPushedAtomStats[atomId]++;
+ mPushedAtomStats[atomId].logCount++;
+ mPushedAtomStats[atomId].skipCount += isSkipped;
} else {
if (atomId < 0) {
android_errorWriteLog(0x534e4554, "187957589");
}
- if (mNonPlatformPushedAtomStats.size() < kMaxNonPlatformPushedAtoms) {
- mNonPlatformPushedAtomStats[atomId]++;
+ if (mNonPlatformPushedAtomStats.size() < kMaxNonPlatformPushedAtoms ||
+ mNonPlatformPushedAtomStats.find(atomId) != mNonPlatformPushedAtomStats.end()) {
+ mNonPlatformPushedAtomStats[atomId].logCount++;
+ mNonPlatformPushedAtomStats[atomId].skipCount += isSkipped;
}
}
}
@@ -582,6 +664,137 @@
}
}
+void StatsdStats::noteQueryRestrictedMetricSucceed(const int64_t configId,
+ const string& configPackage,
+ const std::optional<int32_t> configUid,
+ const int32_t callingUid,
+ const int64_t latencyNs) {
+ lock_guard<std::mutex> lock(mLock);
+
+ if (mRestrictedMetricQueryStats.size() == kMaxRestrictedMetricQueryCount) {
+ mRestrictedMetricQueryStats.pop_front();
+ }
+ mRestrictedMetricQueryStats.emplace_back(RestrictedMetricQueryStats(
+ callingUid, configId, configPackage, configUid, getWallClockNs(),
+ /*invalidQueryReason=*/std::nullopt, /*error=*/"", latencyNs));
+}
+
+void StatsdStats::noteQueryRestrictedMetricFailed(const int64_t configId,
+ const string& configPackage,
+ const std::optional<int32_t> configUid,
+ const int32_t callingUid,
+ const InvalidQueryReason reason) {
+ lock_guard<std::mutex> lock(mLock);
+ noteQueryRestrictedMetricFailedLocked(configId, configPackage, configUid, callingUid, reason,
+ /*error=*/"");
+}
+
+void StatsdStats::noteQueryRestrictedMetricFailed(
+ const int64_t configId, const string& configPackage, const std::optional<int32_t> configUid,
+ const int32_t callingUid, const InvalidQueryReason reason, const string& error) {
+ lock_guard<std::mutex> lock(mLock);
+ noteQueryRestrictedMetricFailedLocked(configId, configPackage, configUid, callingUid, reason,
+ error);
+}
+
+void StatsdStats::noteQueryRestrictedMetricFailedLocked(
+ const int64_t configId, const string& configPackage, const std::optional<int32_t> configUid,
+ const int32_t callingUid, const InvalidQueryReason reason, const string& error) {
+ if (mRestrictedMetricQueryStats.size() == kMaxRestrictedMetricQueryCount) {
+ mRestrictedMetricQueryStats.pop_front();
+ }
+ mRestrictedMetricQueryStats.emplace_back(RestrictedMetricQueryStats(
+ callingUid, configId, configPackage, configUid, getWallClockNs(), reason, error,
+ /*queryLatencyNs=*/std::nullopt));
+}
+
+void StatsdStats::noteRestrictedMetricInsertError(const ConfigKey& configKey,
+ const int64_t metricId) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(configKey);
+ if (it != mConfigStats.end()) {
+ it->second->restricted_metric_stats[metricId].insertError++;
+ }
+}
+
+void StatsdStats::noteRestrictedMetricTableCreationError(const ConfigKey& configKey,
+ const int64_t metricId) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(configKey);
+ if (it != mConfigStats.end()) {
+ it->second->restricted_metric_stats[metricId].tableCreationError++;
+ }
+}
+
+void StatsdStats::noteRestrictedMetricTableDeletionError(const ConfigKey& configKey,
+ const int64_t metricId) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(configKey);
+ if (it != mConfigStats.end()) {
+ it->second->restricted_metric_stats[metricId].tableDeletionError++;
+ }
+}
+
+void StatsdStats::noteRestrictedMetricFlushLatency(const ConfigKey& configKey,
+ const int64_t metricId,
+ const int64_t flushLatencyNs) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(configKey);
+ if (it == mConfigStats.end()) {
+ ALOGE("Config key %s not found!", configKey.ToString().c_str());
+ return;
+ }
+ auto& restrictedMetricStats = it->second->restricted_metric_stats[metricId];
+ if (restrictedMetricStats.flushLatencyNs.size() == kMaxRestrictedMetricFlushLatencyCount) {
+ restrictedMetricStats.flushLatencyNs.pop_front();
+ }
+ restrictedMetricStats.flushLatencyNs.push_back(flushLatencyNs);
+}
+
+void StatsdStats::noteRestrictedConfigFlushLatency(const ConfigKey& configKey,
+ const int64_t totalFlushLatencyNs) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(configKey);
+ if (it == mConfigStats.end()) {
+ ALOGE("Config key %s not found!", configKey.ToString().c_str());
+ return;
+ }
+ std::list<int64_t>& totalFlushLatencies = it->second->total_flush_latency_ns;
+ if (totalFlushLatencies.size() == kMaxRestrictedConfigFlushLatencyCount) {
+ totalFlushLatencies.pop_front();
+ }
+ totalFlushLatencies.push_back(totalFlushLatencyNs);
+}
+
+void StatsdStats::noteRestrictedConfigDbSize(const ConfigKey& configKey,
+ const int64_t elapsedTimeNs, const int64_t dbSize) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(configKey);
+ if (it == mConfigStats.end()) {
+ ALOGE("Config key %s not found!", configKey.ToString().c_str());
+ return;
+ }
+ std::list<int64_t>& totalDbSizeTimestamps = it->second->total_db_size_timestamps;
+ std::list<int64_t>& totaDbSizes = it->second->total_db_sizes;
+ if (totalDbSizeTimestamps.size() == kMaxRestrictedConfigDbSizeCount) {
+ totalDbSizeTimestamps.pop_front();
+ totaDbSizes.pop_front();
+ }
+ totalDbSizeTimestamps.push_back(elapsedTimeNs);
+ totaDbSizes.push_back(dbSize);
+}
+
+void StatsdStats::noteRestrictedMetricCategoryChanged(const ConfigKey& configKey,
+ const int64_t metricId) {
+ lock_guard<std::mutex> lock(mLock);
+ auto it = mConfigStats.find(configKey);
+ if (it == mConfigStats.end()) {
+ ALOGE("Config key %s not found!", configKey.ToString().c_str());
+ return;
+ }
+ it->second->restricted_metric_stats[metricId].categoryChangedCount++;
+}
+
StatsdStats::AtomMetricStats& StatsdStats::getAtomMetricStats(int64_t metricId) {
auto atomMetricStatsIter = mAtomMetricStats.find(metricId);
if (atomMetricStatsIter != mAtomMetricStats.end()) {
@@ -600,7 +813,7 @@
// Reset the historical data, but keep the active ConfigStats
mStartTimeSec = getWallClockSec();
mIceBox.clear();
- std::fill(mPushedAtomStats.begin(), mPushedAtomStats.end(), 0);
+ std::fill(mPushedAtomStats.begin(), mPushedAtomStats.end(), PushedAtomStats());
mNonPlatformPushedAtomStats.clear();
mAnomalyAlarmRegisteredStats = 0;
mPeriodicAlarmRegisteredStats = 0;
@@ -622,6 +835,11 @@
config.second->metric_stats.clear();
config.second->metric_dimension_in_condition_stats.clear();
config.second->alert_stats.clear();
+ config.second->restricted_metric_stats.clear();
+ config.second->db_corrupted_count = 0;
+ config.second->total_flush_latency_ns.clear();
+ config.second->total_db_size_timestamps.clear();
+ config.second->total_db_sizes.clear();
}
for (auto& pullStats : mPulledAtomStats) {
pullStats.second.totalPull = 0;
@@ -648,6 +866,8 @@
mAtomMetricStats.clear();
mActivationBroadcastGuardrailStats.clear();
mPushedAtomErrorStats.clear();
+ mPushedAtomDropsStats.clear();
+ mRestrictedMetricQueryStats.clear();
}
string buildTimeString(int64_t timeSec) {
@@ -658,7 +878,7 @@
return string(timeBuffer);
}
-int StatsdStats::getPushedAtomErrors(int atomId) const {
+int StatsdStats::getPushedAtomErrorsLocked(int atomId) const {
const auto& it = mPushedAtomErrorStats.find(atomId);
if (it != mPushedAtomErrorStats.end()) {
return it->second;
@@ -667,6 +887,15 @@
}
}
+int StatsdStats::getPushedAtomDropsLocked(int atomId) const {
+ const auto& it = mPushedAtomDropsStats.find(atomId);
+ if (it != mPushedAtomDropsStats.end()) {
+ return it->second;
+ } else {
+ return 0;
+ }
+}
+
void StatsdStats::dumpStats(int out) const {
lock_guard<std::mutex> lock(mLock);
time_t t = mStartTimeSec;
@@ -678,11 +907,18 @@
for (const auto& configStats : mIceBox) {
dprintf(out,
"Config {%d_%lld}: creation=%d, deletion=%d, reset=%d, #metric=%d, #condition=%d, "
- "#matcher=%d, #alert=%d, valid=%d\n",
+ "#matcher=%d, #alert=%d, valid=%d, device_info_table_creation_failed=%d, "
+ "db_corrupted_count=%d\n",
configStats->uid, (long long)configStats->id, configStats->creation_time_sec,
configStats->deletion_time_sec, configStats->reset_time_sec,
configStats->metric_count, configStats->condition_count, configStats->matcher_count,
- configStats->alert_count, configStats->is_valid);
+ configStats->alert_count, configStats->is_valid,
+ configStats->device_info_table_creation_failed, configStats->db_corrupted_count);
+
+ if (!configStats->is_valid) {
+ dprintf(out, "\tinvalid config reason: %s\n",
+ InvalidConfigReasonEnum_Name(configStats->reason->reason).c_str());
+ }
for (const auto& broadcastTime : configStats->broadcast_sent_time_sec) {
dprintf(out, "\tbroadcast time: %d\n", broadcastTime);
@@ -703,17 +939,33 @@
dprintf(out, "\tdata drop time: %d with size %lld", *dropTimePtr,
(long long)*dropBytesPtr);
}
+
+ for (const int64_t flushLatency : configStats->total_flush_latency_ns) {
+ dprintf(out, "\tflush latency time ns: %lld\n", (long long)flushLatency);
+ }
+
+ for (const int64_t dbSize : configStats->total_db_sizes) {
+ dprintf(out, "\tdb size: %lld\n", (long long)dbSize);
+ }
}
dprintf(out, "%lu Active Configs\n", (unsigned long)mConfigStats.size());
for (auto& pair : mConfigStats) {
auto& configStats = pair.second;
dprintf(out,
"Config {%d-%lld}: creation=%d, deletion=%d, #metric=%d, #condition=%d, "
- "#matcher=%d, #alert=%d, valid=%d\n",
+ "#matcher=%d, #alert=%d, valid=%d, device_info_table_creation_failed=%d, "
+ "db_corrupted_count=%d\n",
configStats->uid, (long long)configStats->id, configStats->creation_time_sec,
configStats->deletion_time_sec, configStats->metric_count,
configStats->condition_count, configStats->matcher_count, configStats->alert_count,
- configStats->is_valid);
+ configStats->is_valid, configStats->device_info_table_creation_failed,
+ configStats->db_corrupted_count);
+
+ if (!configStats->is_valid) {
+ dprintf(out, "\tinvalid config reason: %s\n",
+ InvalidConfigReasonEnum_Name(configStats->reason->reason).c_str());
+ }
+
for (const auto& annotation : configStats->annotations) {
dprintf(out, "\tannotation: %lld, %d\n", (long long)annotation.first,
annotation.second);
@@ -764,20 +1016,43 @@
for (const auto& stats : pair.second->alert_stats) {
dprintf(out, "alert %lld declared %d times\n", (long long)stats.first, stats.second);
}
+
+ for (const auto& stats : configStats->restricted_metric_stats) {
+ dprintf(out, "Restricted MetricId %lld: ", (long long)stats.first);
+ dprintf(out, "Insert error %lld, ", (long long)stats.second.insertError);
+ dprintf(out, "Table creation error %lld, ", (long long)stats.second.tableCreationError);
+ dprintf(out, "Table deletion error %lld ", (long long)stats.second.tableDeletionError);
+ dprintf(out, "Category changed count %lld\n ",
+ (long long)stats.second.categoryChangedCount);
+ string flushLatencies = "Flush Latencies: ";
+ for (const int64_t latencyNs : stats.second.flushLatencyNs) {
+ flushLatencies.append(to_string(latencyNs).append(","));
+ }
+ flushLatencies.pop_back();
+ flushLatencies.push_back('\n');
+ dprintf(out, "%s", flushLatencies.c_str());
+ }
+
+ for (const int64_t flushLatency : configStats->total_flush_latency_ns) {
+ dprintf(out, "flush latency time ns: %lld\n", (long long)flushLatency);
+ }
}
dprintf(out, "********Disk Usage stats***********\n");
StorageManager::printStats(out);
dprintf(out, "********Pushed Atom stats***********\n");
const size_t atomCounts = mPushedAtomStats.size();
for (size_t i = 2; i < atomCounts; i++) {
- if (mPushedAtomStats[i] > 0) {
- dprintf(out, "Atom %zu->(total count)%d, (error count)%d\n", i, mPushedAtomStats[i],
- getPushedAtomErrors((int)i));
+ if (mPushedAtomStats[i].logCount > 0) {
+ dprintf(out,
+ "Atom %zu->(total count)%d, (error count)%d, (drop count)%d, (skip count)%d\n",
+ i, mPushedAtomStats[i].logCount, getPushedAtomErrorsLocked((int)i),
+ getPushedAtomDropsLocked((int)i), mPushedAtomStats[i].skipCount);
}
}
for (const auto& pair : mNonPlatformPushedAtomStats) {
- dprintf(out, "Atom %d->(total count)%d, (error count)%d\n", pair.first, pair.second,
- getPushedAtomErrors(pair.first));
+ dprintf(out, "Atom %d->(total count)%d, (error count)%d, (drop count)%d, (skip count)%d\n",
+ pair.first, pair.second.logCount, getPushedAtomErrorsLocked(pair.first),
+ getPushedAtomDropsLocked((int)pair.first), pair.second.skipCount);
}
dprintf(out, "********Pulled Atom stats***********\n");
@@ -804,7 +1079,7 @@
string uptimeMillis = "(pull timeout system uptime millis) ";
string pullTimeoutMillis = "(pull timeout elapsed time millis) ";
for (const auto& stats : pair.second.pullTimeoutMetadata) {
- uptimeMillis.append(to_string(stats.pullTimeoutUptimeMillis)).append(",");;
+ uptimeMillis.append(to_string(stats.pullTimeoutUptimeMillis)).append(",");
pullTimeoutMillis.append(to_string(stats.pullTimeoutElapsedMillis)).append(",");
}
uptimeMillis.pop_back();
@@ -856,6 +1131,30 @@
}
dprintf(out, "\n");
}
+
+ if (mRestrictedMetricQueryStats.size() > 0) {
+ dprintf(out, "********Restricted Metric Query stats***********\n");
+ for (const auto& stat : mRestrictedMetricQueryStats) {
+ if (stat.mHasError) {
+ dprintf(out,
+ "Query with error type: %d - %lld (query time ns), "
+ "%d (calling uid), %lld (config id), %s (config package), %s (error)\n",
+ stat.mInvalidQueryReason.value(), (long long)stat.mQueryWallTimeNs,
+ stat.mCallingUid, (long long)stat.mConfigId, stat.mConfigPackage.c_str(),
+ stat.mError.c_str());
+ } else {
+ dprintf(out,
+ "Query succeed - %lld (query time ns), %d (calling uid), "
+ "%lld (config id), %s (config package), %d (config uid), "
+ "%lld (queryLatencyNs)\n",
+ (long long)stat.mQueryWallTimeNs, stat.mCallingUid,
+ (long long)stat.mConfigId, stat.mConfigPackage.c_str(),
+ stat.mConfigUid.value(), (long long)stat.mQueryLatencyNs.value());
+ }
+ }
+ }
+ dprintf(out, "********Shard Offset Provider stats***********\n");
+ dprintf(out, "Shard Offset: %u\n", ShardOffsetProvider::getInstance().getShardOffset());
}
void addConfigStatsToProto(const ConfigStats& configStats, ProtoOutputStream* proto) {
@@ -878,6 +1177,44 @@
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_ALERT_COUNT, configStats.alert_count);
proto->write(FIELD_TYPE_BOOL | FIELD_ID_CONFIG_STATS_VALID, configStats.is_valid);
+ if (!configStats.is_valid) {
+ uint64_t tmpToken =
+ proto->start(FIELD_TYPE_MESSAGE | FIELD_ID_CONFIG_STATS_INVALID_CONFIG_REASON);
+ proto->write(FIELD_TYPE_ENUM | FIELD_ID_INVALID_CONFIG_REASON_ENUM,
+ configStats.reason->reason);
+ if (configStats.reason->metricId.has_value()) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_INVALID_CONFIG_REASON_METRIC_ID,
+ configStats.reason->metricId.value());
+ }
+ if (configStats.reason->stateId.has_value()) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_INVALID_CONFIG_REASON_STATE_ID,
+ configStats.reason->stateId.value());
+ }
+ if (configStats.reason->alertId.has_value()) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_INVALID_CONFIG_REASON_ALERT_ID,
+ configStats.reason->alertId.value());
+ }
+ if (configStats.reason->alarmId.has_value()) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_INVALID_CONFIG_REASON_ALARM_ID,
+ configStats.reason->alarmId.value());
+ }
+ if (configStats.reason->subscriptionId.has_value()) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_INVALID_CONFIG_REASON_SUBSCRIPTION_ID,
+ configStats.reason->subscriptionId.value());
+ }
+ for (const auto& matcherId : configStats.reason->matcherIds) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_COUNT_REPEATED |
+ FIELD_ID_INVALID_CONFIG_REASON_MATCHER_ID,
+ matcherId);
+ }
+ for (const auto& conditionId : configStats.reason->conditionIds) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_COUNT_REPEATED |
+ FIELD_ID_INVALID_CONFIG_REASON_CONDITION_ID,
+ conditionId);
+ }
+ proto->end(tmpToken);
+ }
+
for (const auto& broadcast : configStats.broadcast_sent_time_sec) {
proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_BROADCAST | FIELD_COUNT_REPEATED,
broadcast);
@@ -964,6 +1301,47 @@
proto->end(tmpToken);
}
+ for (const auto& pair : configStats.restricted_metric_stats) {
+ uint64_t token =
+ proto->start(FIELD_TYPE_MESSAGE | FIELD_ID_CONFIG_STATS_RESTRICTED_METRIC_STATS |
+ FIELD_COUNT_REPEATED);
+
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_STATS_METRIC_ID, (long long)pair.first);
+ writeNonZeroStatToStream(FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_STATS_INSERT_ERROR,
+ (long long)pair.second.insertError, proto);
+ writeNonZeroStatToStream(FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_STATS_TABLE_CREATION_ERROR,
+ (long long)pair.second.tableCreationError, proto);
+ writeNonZeroStatToStream(FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_STATS_TABLE_DELETION_ERROR,
+ (long long)pair.second.tableDeletionError, proto);
+ for (const int64_t flushLatencyNs : pair.second.flushLatencyNs) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_STATS_FLUSH_LATENCY |
+ FIELD_COUNT_REPEATED,
+ flushLatencyNs);
+ }
+ writeNonZeroStatToStream(
+ FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_STATS_CATEGORY_CHANGED_COUNT,
+ (long long)pair.second.categoryChangedCount, proto);
+ proto->end(token);
+ }
+ proto->write(FIELD_TYPE_BOOL | FIELD_ID_CONFIG_STATS_DEVICE_INFO_TABLE_CREATION_FAILED,
+ configStats.device_info_table_creation_failed);
+ proto->write(FIELD_TYPE_INT32 | FIELD_ID_CONFIG_STATS_RESTRICTED_DB_CORRUPTED_COUNT,
+ configStats.db_corrupted_count);
+ for (int64_t latency : configStats.total_flush_latency_ns) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_CONFIG_STATS_RESTRICTED_CONFIG_FLUSH_LATENCY |
+ FIELD_COUNT_REPEATED,
+ latency);
+ }
+ for (int64_t dbSizeTimestamp : configStats.total_db_size_timestamps) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_CONFIG_STATS_RESTRICTED_CONFIG_DB_SIZE_TIME_SEC |
+ FIELD_COUNT_REPEATED,
+ dbSizeTimestamp);
+ }
+ for (int64_t dbSize : configStats.total_db_sizes) {
+ proto->write(FIELD_TYPE_INT64 | FIELD_ID_CONFIG_STATS_RESTRICTED_CONFIG_DB_SIZE_BYTES |
+ FIELD_COUNT_REPEATED,
+ dbSize);
+ }
proto->end(token);
}
@@ -984,15 +1362,19 @@
const size_t atomCounts = mPushedAtomStats.size();
for (size_t i = 2; i < atomCounts; i++) {
- if (mPushedAtomStats[i] > 0) {
+ if (mPushedAtomStats[i].logCount > 0) {
uint64_t token =
proto.start(FIELD_TYPE_MESSAGE | FIELD_ID_ATOM_STATS | FIELD_COUNT_REPEATED);
proto.write(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_TAG, (int32_t)i);
- proto.write(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_COUNT, mPushedAtomStats[i]);
- int errors = getPushedAtomErrors(i);
- if (errors > 0) {
- proto.write(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_ERROR_COUNT, errors);
- }
+ proto.write(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_COUNT, mPushedAtomStats[i].logCount);
+ const int errors = getPushedAtomErrorsLocked(i);
+ writeNonZeroStatToStream(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_ERROR_COUNT, errors,
+ &proto);
+ const int drops = getPushedAtomDropsLocked(i);
+ writeNonZeroStatToStream(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_DROPS_COUNT, drops,
+ &proto);
+ writeNonZeroStatToStream(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_SKIP_COUNT,
+ mPushedAtomStats[i].skipCount, &proto);
proto.end(token);
}
}
@@ -1001,20 +1383,23 @@
uint64_t token =
proto.start(FIELD_TYPE_MESSAGE | FIELD_ID_ATOM_STATS | FIELD_COUNT_REPEATED);
proto.write(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_TAG, pair.first);
- proto.write(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_COUNT, pair.second);
- int errors = getPushedAtomErrors(pair.first);
- if (errors > 0) {
- proto.write(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_ERROR_COUNT, errors);
- }
+ proto.write(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_COUNT, pair.second.logCount);
+ const int errors = getPushedAtomErrorsLocked(pair.first);
+ writeNonZeroStatToStream(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_ERROR_COUNT, errors,
+ &proto);
+ const int drops = getPushedAtomDropsLocked(pair.first);
+ writeNonZeroStatToStream(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_DROPS_COUNT, drops, &proto);
+ writeNonZeroStatToStream(FIELD_TYPE_INT32 | FIELD_ID_ATOM_STATS_SKIP_COUNT,
+ pair.second.skipCount, &proto);
proto.end(token);
}
for (const auto& pair : mPulledAtomStats) {
- android::os::statsd::writePullerStatsToStream(pair, &proto);
+ writePullerStatsToStream(pair, &proto);
}
for (const auto& pair : mAtomMetricStats) {
- android::os::statsd::writeAtomMetricStatsToStream(pair, &proto);
+ writeAtomMetricStatsToStream(pair, &proto);
}
if (mAnomalyAlarmRegisteredStats > 0) {
@@ -1079,6 +1464,42 @@
proto.end(token);
}
+ for (const auto& stat : mRestrictedMetricQueryStats) {
+ uint64_t token = proto.start(FIELD_TYPE_MESSAGE | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS |
+ FIELD_COUNT_REPEATED);
+ proto.write(FIELD_TYPE_INT32 | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_CALLING_UID,
+ stat.mCallingUid);
+ proto.write(FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_CONFIG_ID,
+ stat.mConfigId);
+ proto.write(FIELD_TYPE_STRING | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_CONFIG_PACKAGE,
+ stat.mConfigPackage);
+ if (stat.mConfigUid.has_value()) {
+ proto.write(FIELD_TYPE_INT32 | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_CONFIG_UID,
+ stat.mConfigUid.value());
+ }
+ if (stat.mInvalidQueryReason.has_value()) {
+ proto.write(
+ FIELD_TYPE_ENUM | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_INVALID_QUERY_REASON,
+ stat.mInvalidQueryReason.value());
+ }
+ proto.write(FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_QUERY_WALL_TIME_NS,
+ stat.mQueryWallTimeNs);
+ proto.write(FIELD_TYPE_BOOL | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_HAS_ERROR,
+ stat.mHasError);
+ if (stat.mHasError && !stat.mError.empty()) {
+ proto.write(FIELD_TYPE_STRING | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_ERROR,
+ stat.mError);
+ }
+ if (stat.mQueryLatencyNs.has_value()) {
+ proto.write(FIELD_TYPE_INT64 | FIELD_ID_RESTRICTED_METRIC_QUERY_STATS_LATENCY_NS,
+ stat.mQueryLatencyNs.value());
+ }
+ proto.end(token);
+ }
+
+ proto.write(FIELD_TYPE_UINT32 | FIELD_ID_SHARD_OFFSET,
+ static_cast<long>(ShardOffsetProvider::getInstance().getShardOffset()));
+
output->clear();
size_t bufferSize = proto.size();
output->resize(bufferSize);
@@ -1106,6 +1527,89 @@
kDimensionKeySizeHardLimit);
}
+InvalidConfigReason createInvalidConfigReasonWithMatcher(const InvalidConfigReasonEnum reason,
+ const int64_t matcherId) {
+ InvalidConfigReason invalidConfigReason(reason);
+ invalidConfigReason.matcherIds.push_back(matcherId);
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithMatcher(const InvalidConfigReasonEnum reason,
+ const int64_t metricId,
+ const int64_t matcherId) {
+ InvalidConfigReason invalidConfigReason(reason, metricId);
+ invalidConfigReason.matcherIds.push_back(matcherId);
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithPredicate(const InvalidConfigReasonEnum reason,
+ const int64_t conditionId) {
+ InvalidConfigReason invalidConfigReason(reason);
+ invalidConfigReason.conditionIds.push_back(conditionId);
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithPredicate(const InvalidConfigReasonEnum reason,
+ const int64_t metricId,
+ const int64_t conditionId) {
+ InvalidConfigReason invalidConfigReason(reason, metricId);
+ invalidConfigReason.conditionIds.push_back(conditionId);
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithState(const InvalidConfigReasonEnum reason,
+ const int64_t metricId,
+ const int64_t stateId) {
+ InvalidConfigReason invalidConfigReason(reason, metricId);
+ invalidConfigReason.stateId = stateId;
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithAlert(const InvalidConfigReasonEnum reason,
+ const int64_t alertId) {
+ InvalidConfigReason invalidConfigReason(reason);
+ invalidConfigReason.alertId = alertId;
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithAlert(const InvalidConfigReasonEnum reason,
+ const int64_t metricId,
+ const int64_t alertId) {
+ InvalidConfigReason invalidConfigReason(reason, metricId);
+ invalidConfigReason.alertId = alertId;
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithAlarm(const InvalidConfigReasonEnum reason,
+ const int64_t alarmId) {
+ InvalidConfigReason invalidConfigReason(reason);
+ invalidConfigReason.alarmId = alarmId;
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithSubscription(const InvalidConfigReasonEnum reason,
+ const int64_t subscriptionId) {
+ InvalidConfigReason invalidConfigReason(reason);
+ invalidConfigReason.subscriptionId = subscriptionId;
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithSubscriptionAndAlarm(
+ const InvalidConfigReasonEnum reason, const int64_t subscriptionId, const int64_t alarmId) {
+ InvalidConfigReason invalidConfigReason(reason);
+ invalidConfigReason.subscriptionId = subscriptionId;
+ invalidConfigReason.alarmId = alarmId;
+ return invalidConfigReason;
+}
+
+InvalidConfigReason createInvalidConfigReasonWithSubscriptionAndAlert(
+ const InvalidConfigReasonEnum reason, const int64_t subscriptionId, const int64_t alertId) {
+ InvalidConfigReason invalidConfigReason(reason);
+ invalidConfigReason.subscriptionId = subscriptionId;
+ invalidConfigReason.alertId = alertId;
+ return invalidConfigReason;
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/guardrail/StatsdStats.h b/statsd/src/guardrail/StatsdStats.h
index d812e55..4bf6ffd 100644
--- a/statsd/src/guardrail/StatsdStats.h
+++ b/statsd/src/guardrail/StatsdStats.h
@@ -15,20 +15,64 @@
*/
#pragma once
-#include "config/ConfigKey.h"
-
#include <gtest/gtest_prod.h>
#include <log/log_time.h>
+#include <src/guardrail/invalid_config_reason_enum.pb.h>
+
#include <list>
#include <mutex>
#include <string>
-#include <vector>
#include <unordered_map>
+#include <vector>
+
+#include "config/ConfigKey.h"
namespace android {
namespace os {
namespace statsd {
+struct InvalidConfigReason {
+ InvalidConfigReasonEnum reason;
+ std::optional<int64_t> metricId;
+ std::optional<int64_t> stateId;
+ std::optional<int64_t> alertId;
+ std::optional<int64_t> alarmId;
+ std::optional<int64_t> subscriptionId;
+ std::vector<int64_t> matcherIds;
+ std::vector<int64_t> conditionIds;
+ InvalidConfigReason(){};
+ InvalidConfigReason(InvalidConfigReasonEnum reason) : reason(reason){};
+ InvalidConfigReason(InvalidConfigReasonEnum reason, int64_t metricId)
+ : reason(reason), metricId(metricId){};
+ bool operator==(const InvalidConfigReason& other) const {
+ return (this->reason == other.reason) && (this->metricId == other.metricId) &&
+ (this->stateId == other.stateId) && (this->alertId == other.alertId) &&
+ (this->alarmId == other.alarmId) && (this->subscriptionId == other.subscriptionId) &&
+ (this->matcherIds == other.matcherIds) && (this->conditionIds == other.conditionIds);
+ }
+};
+
+// Keep this in sync with InvalidQueryReason enum in stats_log.proto
+enum InvalidQueryReason {
+ UNKNOWN_REASON = 0,
+ FLAG_DISABLED = 1,
+ UNSUPPORTED_SQLITE_VERSION = 2,
+ AMBIGUOUS_CONFIG_KEY = 3,
+ CONFIG_KEY_NOT_FOUND = 4,
+ CONFIG_KEY_WITH_UNMATCHED_DELEGATE = 5,
+ QUERY_FAILURE = 6,
+ INCONSISTENT_ROW_SIZE = 7,
+ NULL_CALLBACK = 8
+};
+
+typedef struct {
+ int64_t insertError = 0;
+ int64_t tableCreationError = 0;
+ int64_t tableDeletionError = 0;
+ std::list<int64_t> flushLatencyNs;
+ int64_t categoryChangedCount = 0;
+} RestrictedMetricStats;
+
struct ConfigStats {
int32_t uid;
int64_t id;
@@ -40,6 +84,11 @@
int32_t matcher_count;
int32_t alert_count;
bool is_valid;
+ bool device_info_table_creation_failed = false;
+ int32_t db_corrupted_count = 0;
+
+ // Stores reasons for why config is valid or not
+ std::optional<InvalidConfigReason> reason;
std::list<int32_t> broadcast_sent_time_sec;
@@ -79,12 +128,23 @@
// Stores the config ID for each sub-config used.
std::list<std::pair<const int64_t, const int32_t>> annotations;
+
+ // Maps metric ID of restricted metric to its stats.
+ std::map<int64_t, RestrictedMetricStats> restricted_metric_stats;
+
+ std::list<int64_t> total_flush_latency_ns;
+
+ // Stores the last 20 timestamps for computing sqlite db size.
+ std::list<int64_t> total_db_size_timestamps;
+
+ // Stores the last 20 sizes of the sqlite db.
+ std::list<int64_t> total_db_sizes;
};
struct UidMapStats {
- int32_t changes;
- int32_t bytes_used;
- int32_t dropped_changes;
+ int32_t changes = 0;
+ int32_t bytes_used = 0;
+ int32_t dropped_changes = 0;
int32_t deleted_apps = 0;
};
@@ -120,6 +180,14 @@
const static int kMaxPullAtomPackages = 100;
+ const static int kMaxRestrictedMetricQueryCount = 20;
+
+ const static int kMaxRestrictedMetricFlushLatencyCount = 20;
+
+ const static int kMaxRestrictedConfigFlushLatencyCount = 20;
+
+ const static int kMaxRestrictedConfigDbSizeCount = 20;
+
// Max memory allowed for storing metrics per configuration. If this limit is exceeded, statsd
// drops the metrics data in memory.
static const size_t kMaxMetricsBytesPerConfig = 2 * 1024 * 1024;
@@ -128,6 +196,10 @@
// data subscriber that it's time to call getData.
static const size_t kBytesPerConfigTriggerGetData = 192 * 1024;
+ // Soft memory limit per restricted configuration. Once this limit is exceeded,
+ // we begin flush in-memory restricted metrics to database.
+ static const size_t kBytesPerRestrictedConfigTriggerFlush = 25 * 1024;
+
// Cap the UID map's memory usage to this. This should be fairly high since the UID information
// is critical for understanding the metrics.
const static size_t kMaxBytesUsedUidMap = 50 * 1024;
@@ -141,6 +213,15 @@
/* Min period between two checks of byte size per config key in nanoseconds. */
static const int64_t kMinByteSizeCheckPeriodNs = 60 * NS_PER_SEC;
+ /* Min period between two checks of restricted metrics TTLs. */
+ static const int64_t kMinTtlCheckPeriodNs = 60 * 60 * NS_PER_SEC;
+
+ /* Min period between two flush operations of restricted metrics. */
+ static const int64_t kMinFlushRestrictedPeriodNs = 60 * 60 * NS_PER_SEC;
+
+ /* Min period between two db guardrail check operations of restricted metrics. */
+ static const int64_t kMinDbGuardrailEnforcementPeriodNs = 60 * 60 * NS_PER_SEC;
+
/* Minimum period between two activation broadcasts in nanoseconds. */
static const int64_t kMinActivationBroadcastPeriodNs = 10 * NS_PER_SEC;
@@ -165,9 +246,12 @@
// Maximum number of pushed atoms statsd stats will track above kMaxPushedAtomId.
static const int kMaxNonPlatformPushedAtoms = 600;
+ // Maximum number of pushed atoms error statsd stats will track.
+ static const int kMaxPushedAtomErrorStatsSize = 100;
+
// Maximum atom id value that we consider a platform pushed atom.
// This should be updated once highest pushed atom id in atoms.proto approaches this value.
- static const int kMaxPushedAtomId = 750;
+ static const int kMaxPushedAtomId = 900;
// Atom id that is the start of the pulled atoms.
static const int kPullAtomStartTag = 10000;
@@ -200,7 +284,7 @@
void noteConfigReceived(const ConfigKey& key, int metricsCount, int conditionsCount,
int matchersCount, int alertCount,
const std::list<std::pair<const int64_t, const int32_t>>& annotations,
- bool isValid);
+ const std::optional<InvalidConfigReason>& reason);
/**
* Report a config has been removed.
*/
@@ -235,6 +319,16 @@
void noteMetricsReportSent(const ConfigKey& key, const size_t num_bytes);
/**
+ * Report failure in creating the device info metadata table for restricted configs.
+ */
+ void noteDeviceInfoTableCreationFailed(const ConfigKey& key);
+
+ /**
+ * Report db corruption for restricted configs.
+ */
+ void noteDbCorrupted(const ConfigKey& key);
+
+ /**
* Report the size of output tuple of a condition.
*
* Note: only report when the condition has an output dimension, and the tuple
@@ -289,7 +383,7 @@
/**
* Report an atom event has been logged.
*/
- void noteAtomLogged(int atomId, int32_t timeSec);
+ void noteAtomLogged(int atomId, int32_t timeSec, bool isSkipped);
/**
* Report that statsd modified the anomaly alarm registered with StatsCompanionService.
@@ -459,25 +553,65 @@
*/
void noteBucketUnknownCondition(int64_t metricId);
- /* Reports one event has been dropped due to queue overflow, and the oldest event timestamp in
- * the queue */
- void noteEventQueueOverflow(int64_t oldestEventTimestampNs);
+ /* Reports one event id has been dropped due to queue overflow, and the oldest event timestamp
+ * in the queue */
+ void noteEventQueueOverflow(int64_t oldestEventTimestampNs, int32_t atomId, bool isSkipped);
/**
* Reports that the activation broadcast guardrail was hit for this uid. Namely, the broadcast
* should have been sent, but instead was skipped due to hitting the guardrail.
*/
- void noteActivationBroadcastGuardrailHit(const int uid);
+ void noteActivationBroadcastGuardrailHit(const int uid);
- /**
- * Reports that an atom is erroneous or cannot be parsed successfully by
- * statsd. An atom tag of 0 indicates that the client did not supply the
- * atom id within the encoding.
- *
- * For pushed atoms only, this call should be preceded by a call to
- * noteAtomLogged.
- */
- void noteAtomError(int atomTag, bool pull=false);
+ /**
+ * Reports that an atom is erroneous or cannot be parsed successfully by
+ * statsd. An atom tag of 0 indicates that the client did not supply the
+ * atom id within the encoding.
+ *
+ * For pushed atoms only, this call should be preceded by a call to
+ * noteAtomLogged.
+ */
+ void noteAtomError(int atomTag, bool pull = false);
+
+ /** Report query of restricted metric succeed **/
+ void noteQueryRestrictedMetricSucceed(const int64_t configId, const string& configPackage,
+ const std::optional<int32_t> configUid,
+ const int32_t callingUid, const int64_t queryLatencyNs);
+
+ /** Report query of restricted metric failed **/
+ void noteQueryRestrictedMetricFailed(const int64_t configId, const string& configPackage,
+ const std::optional<int32_t> configUid,
+ const int32_t callingUid, const InvalidQueryReason reason);
+
+ /** Report query of restricted metric failed along with an error string **/
+ void noteQueryRestrictedMetricFailed(const int64_t configId, const string& configPackage,
+ const std::optional<int32_t> configUid,
+ const int32_t callingUid, const InvalidQueryReason reason,
+ const string& error);
+
+ // Reports that a restricted metric fails to be inserted to database.
+ void noteRestrictedMetricInsertError(const ConfigKey& configKey, int64_t metricId);
+
+ // Reports that a restricted metric fails to create table in database.
+ void noteRestrictedMetricTableCreationError(const ConfigKey& configKey, const int64_t metricId);
+
+ // Reports that a restricted metric fails to delete table in database.
+ void noteRestrictedMetricTableDeletionError(const ConfigKey& configKey, const int64_t metricId);
+
+ // Reports the time it takes for a restricted metric to flush the data to the database.
+ void noteRestrictedMetricFlushLatency(const ConfigKey& configKey, const int64_t metricId,
+ const int64_t flushLatencyNs);
+
+ // Reports that a restricted metric had a category change.
+ void noteRestrictedMetricCategoryChanged(const ConfigKey& configKey, const int64_t metricId);
+
+ // Reports the time is takes to flush a restricted config to the database.
+ void noteRestrictedConfigFlushLatency(const ConfigKey& configKey,
+ const int64_t totalFlushLatencyNs);
+
+ // Reports the size of the internal sqlite db.
+ void noteRestrictedConfigDbSize(const ConfigKey& configKey, const int64_t elapsedTimeNs,
+ const int64_t dbSize);
/**
* Reset the historical stats. Including all stats in icebox, and the tracked stats about
@@ -506,9 +640,10 @@
typedef struct PullTimeoutMetadata {
int64_t pullTimeoutUptimeMillis;
int64_t pullTimeoutElapsedMillis;
- PullTimeoutMetadata(int64_t uptimeMillis, int64_t elapsedMillis) :
- pullTimeoutUptimeMillis(uptimeMillis),
- pullTimeoutElapsedMillis(elapsedMillis) {/* do nothing */}
+ PullTimeoutMetadata(int64_t uptimeMillis, int64_t elapsedMillis)
+ : pullTimeoutUptimeMillis(uptimeMillis),
+ pullTimeoutElapsedMillis(elapsedMillis) { /* do nothing */
+ }
} PullTimeoutMetadata;
typedef struct {
@@ -567,24 +702,33 @@
// The size of the vector is capped by kMaxIceBoxSize.
std::list<const std::shared_ptr<ConfigStats>> mIceBox;
- // Stores the number of times a pushed atom is logged.
+ // Stores the number of times a pushed atom is logged and skipped (if skipped).
// The size of the vector is the largest pushed atom id in atoms.proto + 1. Atoms
// out of that range will be put in mNonPlatformPushedAtomStats.
// This is a vector, not a map because it will be accessed A LOT -- for each stats log.
- std::vector<int> mPushedAtomStats;
+ struct PushedAtomStats {
+ int logCount = 0;
+ int skipCount = 0;
+ };
- // Stores the number of times a pushed atom is logged for atom ids above kMaxPushedAtomId.
- // The max size of the map is kMaxNonPlatformPushedAtoms.
- std::unordered_map<int, int> mNonPlatformPushedAtomStats;
+ std::vector<PushedAtomStats> mPushedAtomStats;
+
+ // Stores the number of times a pushed atom is logged and skipped for atom ids above
+ // kMaxPushedAtomId. The max size of the map is kMaxNonPlatformPushedAtoms.
+ std::unordered_map<int, PushedAtomStats> mNonPlatformPushedAtomStats;
+
+ // Stores the number of times a pushed atom is dropped due to queue overflow event.
+ // We do not expect it will happen too often so the map is preferable vs pre-allocated vector
+ // The max size of the map is kMaxPushedAtomId + kMaxNonPlatformPushedAtoms.
+ std::unordered_map<int, int> mPushedAtomDropsStats;
// Maps PullAtomId to its stats. The size is capped by the puller atom counts.
std::map<int, PulledAtomStats> mPulledAtomStats;
// Stores the number of times a pushed atom was logged erroneously. The
// corresponding counts for pulled atoms are stored in PulledAtomStats.
- // The max size of this map is kMaxAtomErrorsStatsSize.
+ // The max size of this map is kMaxPushedAtomErrorStatsSize.
std::map<int, int> mPushedAtomErrorStats;
- int kMaxPushedAtomErrorStatsSize = 100;
// Maps metric ID to its stats. The size is capped by the number of metrics.
std::map<int64_t, AtomMetricStats> mAtomMetricStats;
@@ -628,6 +772,40 @@
std::list<int32_t> mSystemServerRestartSec;
+ struct RestrictedMetricQueryStats {
+ RestrictedMetricQueryStats(int32_t callingUid, int64_t configId,
+ const string& configPackage, std::optional<int32_t> configUid,
+ int64_t queryTimeNs,
+ std::optional<InvalidQueryReason> invalidQueryReason,
+ const string& error, std::optional<int64_t> queryLatencyNs)
+ : mCallingUid(callingUid),
+ mConfigId(configId),
+ mConfigPackage(configPackage),
+ mConfigUid(configUid),
+ mQueryWallTimeNs(queryTimeNs),
+ mInvalidQueryReason(invalidQueryReason),
+ mError(error),
+ mQueryLatencyNs(queryLatencyNs) {
+ mHasError = invalidQueryReason.has_value();
+ }
+ int32_t mCallingUid;
+ int64_t mConfigId;
+ string mConfigPackage;
+ std::optional<int32_t> mConfigUid;
+ int64_t mQueryWallTimeNs;
+ std::optional<InvalidQueryReason> mInvalidQueryReason;
+ bool mHasError;
+ string mError;
+ std::optional<int64_t> mQueryLatencyNs;
+ };
+ std::list<RestrictedMetricQueryStats> mRestrictedMetricQueryStats;
+
+ void noteQueryRestrictedMetricFailedLocked(const int64_t configId, const string& configPackage,
+ const std::optional<int32_t> configUid,
+ const int32_t callingUid,
+ const InvalidQueryReason reason,
+ const string& error);
+
// Stores the number of times statsd modified the anomaly alarm registered with
// StatsCompanionService.
int mAnomalyAlarmRegisteredStats = 0;
@@ -641,6 +819,10 @@
void resetInternalLocked();
+ void noteAtomLoggedLocked(int atomId, bool isSkipped);
+
+ void noteAtomDroppedLocked(int atomId);
+
void noteDataDropped(const ConfigKey& key, const size_t totalBytes, int32_t timeSec);
void noteMetricsReportSent(const ConfigKey& key, const size_t num_bytes, int32_t timeSec);
@@ -653,7 +835,9 @@
void addToIceBoxLocked(std::shared_ptr<ConfigStats>& stats);
- int getPushedAtomErrors(int atomId) const;
+ int getPushedAtomErrorsLocked(int atomId) const;
+
+ int getPushedAtomDropsLocked(int atomId) const;
/**
* Get a reference to AtomMetricStats for a metric. If none exists, create it. The reference
@@ -663,6 +847,8 @@
FRIEND_TEST(StatsdStatsTest, TestValidConfigAdd);
FRIEND_TEST(StatsdStatsTest, TestInvalidConfigAdd);
+ FRIEND_TEST(StatsdStatsTest, TestInvalidConfigMissingMetricId);
+ FRIEND_TEST(StatsdStatsTest, TestInvalidConfigOnlyMetricId);
FRIEND_TEST(StatsdStatsTest, TestConfigRemove);
FRIEND_TEST(StatsdStatsTest, TestSubStats);
FRIEND_TEST(StatsdStatsTest, TestAtomLog);
@@ -674,10 +860,54 @@
FRIEND_TEST(StatsdStatsTest, TestAtomMetricsStats);
FRIEND_TEST(StatsdStatsTest, TestActivationBroadcastGuardrailHit);
FRIEND_TEST(StatsdStatsTest, TestAtomErrorStats);
+ FRIEND_TEST(StatsdStatsTest, TestAtomSkippedStats);
+ FRIEND_TEST(StatsdStatsTest, TestRestrictedMetricsStats);
+ FRIEND_TEST(StatsdStatsTest, TestRestrictedMetricsQueryStats);
+ FRIEND_TEST(StatsdStatsTest, TestAtomDroppedStats);
+ FRIEND_TEST(StatsdStatsTest, TestAtomLoggedAndDroppedStats);
+ FRIEND_TEST(StatsdStatsTest, TestAtomLoggedAndDroppedAndSkippedStats);
+ FRIEND_TEST(StatsdStatsTest, TestShardOffsetProvider);
FRIEND_TEST(StatsLogProcessorTest, InvalidConfigRemoved);
};
+InvalidConfigReason createInvalidConfigReasonWithMatcher(const InvalidConfigReasonEnum reason,
+ const int64_t matcherId);
+
+InvalidConfigReason createInvalidConfigReasonWithMatcher(const InvalidConfigReasonEnum reason,
+ const int64_t metricId,
+ const int64_t matcherId);
+
+InvalidConfigReason createInvalidConfigReasonWithPredicate(const InvalidConfigReasonEnum reason,
+ const int64_t conditionId);
+
+InvalidConfigReason createInvalidConfigReasonWithPredicate(const InvalidConfigReasonEnum reason,
+ const int64_t metricId,
+ const int64_t conditionId);
+
+InvalidConfigReason createInvalidConfigReasonWithState(const InvalidConfigReasonEnum reason,
+ const int64_t metricId,
+ const int64_t stateId);
+
+InvalidConfigReason createInvalidConfigReasonWithAlert(const InvalidConfigReasonEnum reason,
+ const int64_t alertId);
+
+InvalidConfigReason createInvalidConfigReasonWithAlert(const InvalidConfigReasonEnum reason,
+ const int64_t metricId,
+ const int64_t alertId);
+
+InvalidConfigReason createInvalidConfigReasonWithAlarm(const InvalidConfigReasonEnum reason,
+ const int64_t alarmId);
+
+InvalidConfigReason createInvalidConfigReasonWithSubscription(const InvalidConfigReasonEnum reason,
+ const int64_t subscriptionId);
+
+InvalidConfigReason createInvalidConfigReasonWithSubscriptionAndAlarm(
+ const InvalidConfigReasonEnum reason, const int64_t subscriptionId, const int64_t alarmId);
+
+InvalidConfigReason createInvalidConfigReasonWithSubscriptionAndAlert(
+ const InvalidConfigReasonEnum reason, const int64_t subscriptionId, const int64_t alertId);
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/guardrail/invalid_config_reason_enum.proto b/statsd/src/guardrail/invalid_config_reason_enum.proto
new file mode 100644
index 0000000..23a462d
--- /dev/null
+++ b/statsd/src/guardrail/invalid_config_reason_enum.proto
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ syntax = "proto2";
+
+ package android.os.statsd;
+
+ option java_package = "com.android.os";
+ option java_outer_classname = "InvalidConfigReason";
+
+enum InvalidConfigReasonEnum {
+ INVALID_CONFIG_REASON_UNKNOWN = 0;
+ INVALID_CONFIG_REASON_LOG_SOURCE_ALLOWLIST_EMPTY = 1;
+ INVALID_CONFIG_REASON_TOO_MANY_LOG_SOURCES = 2;
+ INVALID_CONFIG_REASON_DEFAULT_PULL_PACKAGES_NOT_IN_MAP = 3;
+ INVALID_CONFIG_REASON_TOO_MANY_SOURCES_IN_PULL_PACKAGES = 4;
+ INVALID_CONFIG_REASON_TOO_MANY_METRICS = 5;
+ INVALID_CONFIG_REASON_TOO_MANY_CONDITIONS = 6;
+ INVALID_CONFIG_REASON_TOO_MANY_MATCHERS = 7;
+ INVALID_CONFIG_REASON_TOO_MANY_ALERTS = 8;
+ INVALID_CONFIG_REASON_PACKAGE_CERT_HASH_SIZE_TOO_LARGE = 9;
+ INVALID_CONFIG_REASON_NO_REPORT_METRIC_NOT_FOUND = 10;
+ INVALID_CONFIG_REASON_METRIC_NOT_IN_PREV_CONFIG = 11;
+ INVALID_CONFIG_REASON_METRIC_UPDATE_STATUS_UNKNOWN = 12;
+ INVALID_CONFIG_REASON_METRIC_HAS_MULTIPLE_ACTIVATIONS = 13;
+ INVALID_CONFIG_REASON_METRIC_SLICED_STATE_ATOM_ALLOWED_FROM_ANY_UID = 14;
+ INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT = 15;
+ INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION = 16;
+ INVALID_CONFIG_REASON_METRIC_STATELINK_NO_STATE = 17;
+ INVALID_CONFIG_REASON_METRIC_BAD_THRESHOLD = 18;
+ INVALID_CONFIG_REASON_METRIC_MATCHER_NOT_FOUND = 19;
+ INVALID_CONFIG_REASON_METRIC_MATCHER_MORE_THAN_ONE_ATOM = 20;
+ INVALID_CONFIG_REASON_METRIC_CONDITION_NOT_FOUND = 21;
+ INVALID_CONFIG_REASON_METRIC_CONDITION_LINK_NOT_FOUND = 22;
+ INVALID_CONFIG_REASON_METRIC_STATE_NOT_FOUND = 23;
+ INVALID_CONFIG_REASON_METRIC_STATELINKS_NOT_SUBSET_DIM_IN_WHAT = 24;
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_MATCHER_NOT_FOUND = 25;
+ INVALID_CONFIG_REASON_METRIC_DEACTIVATION_MATCHER_NOT_FOUND = 26;
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_MATCHER_NOT_FOUND_NEW = 27;
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_MATCHER_NOT_FOUND_EXISTING = 28;
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_NOT_FOUND_EXISTING = 29;
+ INVALID_CONFIG_REASON_METRIC_DEACTIVATION_MATCHER_NOT_FOUND_NEW = 30;
+ INVALID_CONFIG_REASON_METRIC_SERIALIZATION_FAILED = 31;
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_SERIALIZATION_FAILED = 32;
+ INVALID_CONFIG_REASON_DURATION_METRIC_WHAT_NOT_SIMPLE = 33;
+ INVALID_CONFIG_REASON_DURATION_METRIC_WHAT_NOT_FOUND = 34;
+ INVALID_CONFIG_REASON_DURATION_METRIC_MISSING_START = 35;
+ INVALID_CONFIG_REASON_DURATION_METRIC_PRODUCER_INVALID = 36;
+ INVALID_CONFIG_REASON_DURATION_METRIC_MAX_SPARSE_HAS_SLICE_BY_STATE = 37;
+ INVALID_CONFIG_REASON_VALUE_METRIC_MISSING_VALUE_FIELD = 38;
+ INVALID_CONFIG_REASON_VALUE_METRIC_VALUE_FIELD_HAS_POSITION_ALL = 39;
+ INVALID_CONFIG_REASON_VALUE_METRIC_HAS_INCORRECT_VALUE_FIELD = 40;
+ INVALID_CONFIG_REASON_KLL_METRIC_MISSING_KLL_FIELD = 41;
+ INVALID_CONFIG_REASON_KLL_METRIC_KLL_FIELD_HAS_POSITION_ALL = 42;
+ INVALID_CONFIG_REASON_KLL_METRIC_HAS_INCORRECT_KLL_FIELD = 43;
+ INVALID_CONFIG_REASON_GAUGE_METRIC_INCORRECT_FIELD_FILTER = 44;
+ INVALID_CONFIG_REASON_GAUGE_METRIC_TRIGGER_NO_PULL_ATOM = 45;
+ INVALID_CONFIG_REASON_GAUGE_METRIC_TRIGGER_NO_FIRST_N_SAMPLES = 46;
+ INVALID_CONFIG_REASON_GAUGE_METRIC_FIRST_N_SAMPLES_WITH_WRONG_EVENT = 47 [deprecated = true];
+ INVALID_CONFIG_REASON_MATCHER_NOT_IN_PREV_CONFIG = 48;
+ INVALID_CONFIG_REASON_MATCHER_UPDATE_STATUS_UNKNOWN = 49;
+ INVALID_CONFIG_REASON_MATCHER_DUPLICATE = 50;
+ INVALID_CONFIG_REASON_MATCHER_SERIALIZATION_FAILED = 51;
+ INVALID_CONFIG_REASON_MATCHER_MALFORMED_CONTENTS_CASE = 52;
+ INVALID_CONFIG_REASON_MATCHER_TRACKER_NOT_INITIALIZED = 53;
+ INVALID_CONFIG_REASON_MATCHER_NO_OPERATION = 54;
+ INVALID_CONFIG_REASON_MATCHER_NOT_OPERATION_IS_NOT_UNARY = 55;
+ INVALID_CONFIG_REASON_MATCHER_CYCLE = 56;
+ INVALID_CONFIG_REASON_MATCHER_CHILD_NOT_FOUND = 57;
+ INVALID_CONFIG_REASON_CONDITION_NOT_IN_PREV_CONFIG = 58;
+ INVALID_CONFIG_REASON_CONDITION_UPDATE_STATUS_UNKNOWN = 59;
+ INVALID_CONFIG_REASON_CONDITION_DUPLICATE = 60;
+ INVALID_CONFIG_REASON_CONDITION_SERIALIZATION_FAILED = 61;
+ INVALID_CONFIG_REASON_CONDITION_MALFORMED_CONTENTS_CASE = 62;
+ INVALID_CONFIG_REASON_CONDITION_TRACKER_NOT_INITIALIZED = 63;
+ INVALID_CONFIG_REASON_CONDITION_NO_OPERATION = 64;
+ INVALID_CONFIG_REASON_CONDITION_NOT_OPERATION_IS_NOT_UNARY = 65;
+ INVALID_CONFIG_REASON_CONDITION_CYCLE = 66;
+ INVALID_CONFIG_REASON_CONDITION_CHILD_NOT_FOUND = 67;
+ INVALID_CONFIG_REASON_STATE_SERIALIZATION_FAILED = 68;
+ INVALID_CONFIG_REASON_ALERT_METRIC_NOT_FOUND = 69;
+ INVALID_CONFIG_REASON_ALERT_THRESHOLD_MISSING = 70;
+ INVALID_CONFIG_REASON_ALERT_INVALID_TRIGGER_OR_NUM_BUCKETS = 71;
+ INVALID_CONFIG_REASON_ALERT_CANNOT_ADD_ANOMALY = 72;
+ INVALID_CONFIG_REASON_ALERT_NOT_IN_PREV_CONFIG = 73;
+ INVALID_CONFIG_REASON_ALERT_UPDATE_STATUS_UNKNOWN = 74;
+ INVALID_CONFIG_REASON_ALERT_SERIALIZATION_FAILED = 75;
+ INVALID_CONFIG_REASON_ALARM_OFFSET_LESS_THAN_OR_EQUAL_ZERO = 76;
+ INVALID_CONFIG_REASON_ALARM_PERIOD_LESS_THAN_OR_EQUAL_ZERO = 77;
+ INVALID_CONFIG_REASON_SUBSCRIPTION_SUBSCRIBER_INFO_MISSING = 78;
+ INVALID_CONFIG_REASON_SUBSCRIPTION_RULE_NOT_FOUND = 79;
+ INVALID_CONFIG_REASON_METRIC_DIMENSIONAL_SAMPLING_INFO_INCORRECT_SHARD_COUNT = 80;
+ INVALID_CONFIG_REASON_METRIC_DIMENSIONAL_SAMPLING_INFO_MISSING_SAMPLED_FIELD = 81;
+ INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELD_INCORRECT_SIZE = 82;
+ INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELDS_NOT_SUBSET_DIM_IN_WHAT = 83;
+ INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_ENABLED = 84;
+ INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED = 85;
+};
diff --git a/statsd/src/logd/LogEvent.cpp b/statsd/src/logd/LogEvent.cpp
index 398ce3e..8cdd79c 100644
--- a/statsd/src/logd/LogEvent.cpp
+++ b/statsd/src/logd/LogEvent.cpp
@@ -15,14 +15,17 @@
*/
#define STATSD_DEBUG false // STOPSHIP if true
+#include "Log.h"
+
#include "logd/LogEvent.h"
#include <android-base/stringprintf.h>
+#include <android-modules-utils/sdk_level.h>
#include <android/binder_ibinder.h>
-#include <log/log.h>
#include <private/android_filesystem_config.h>
-#include "annotations.h"
+#include "flags/FlagProvider.h"
+#include "stats_annotations.h"
#include "stats_log_util.h"
#include "statslog_statsd.h"
@@ -35,12 +38,25 @@
using namespace android::util;
using android::base::StringPrintf;
+using android::modules::sdklevel::IsAtLeastU;
using android::util::ProtoOutputStream;
using std::string;
using std::vector;
+namespace {
+
+uint8_t getTypeId(uint8_t typeInfo) {
+ return typeInfo & 0x0F; // type id in lower 4 bytes
+}
+
+uint8_t getNumAnnotations(uint8_t typeInfo) {
+ return (typeInfo >> 4) & 0x0F; // num annotations in upper 4 bytes
+}
+
+} // namespace
+
LogEvent::LogEvent(int32_t uid, int32_t pid)
- : mLogdTimestampNs(time(nullptr)), mLogUid(uid), mLogPid(pid) {
+ : mLogdTimestampNs(getWallClockNs()), mLogUid(uid), mLogPid(pid) {
}
LogEvent::LogEvent(const string& trainName, int64_t trainVersionCode, bool requiresStaging,
@@ -271,6 +287,7 @@
// Allowed types: INT, repeated INT
if (numElements > mValues.size() || !checkPreviousValueType(INT) ||
annotationType != BOOL_TYPE) {
+ VLOG("Atom ID %d error while parseIsUidAnnotation()", mTagId);
mValid = false;
return;
}
@@ -287,6 +304,7 @@
void LogEvent::parseTruncateTimestampAnnotation(uint8_t annotationType) {
if (!mValues.empty() || annotationType != BOOL_TYPE) {
+ VLOG("Atom ID %d error while parseTruncateTimestampAnnotation()", mTagId);
mValid = false;
return;
}
@@ -299,6 +317,7 @@
std::optional<size_t> firstUidInChainIndex) {
// Allowed types: all types except for attribution chains and repeated fields.
if (mValues.empty() || annotationType != BOOL_TYPE || firstUidInChainIndex || numElements) {
+ VLOG("Atom ID %d error while parsePrimaryFieldAnnotation()", mTagId);
mValid = false;
return;
}
@@ -311,11 +330,13 @@
std::optional<size_t> firstUidInChainIndex) {
// Allowed types: attribution chains
if (mValues.empty() || annotationType != BOOL_TYPE || !firstUidInChainIndex) {
+ VLOG("Atom ID %d error while parsePrimaryFieldFirstUidAnnotation()", mTagId);
mValid = false;
return;
}
if (mValues.size() < firstUidInChainIndex.value() + 1) { // AttributionChain is empty.
+ VLOG("Atom ID %d error while parsePrimaryFieldFirstUidAnnotation()", mTagId);
mValid = false;
android_errorWriteLog(0x534e4554, "174485572");
return;
@@ -327,9 +348,10 @@
void LogEvent::parseExclusiveStateAnnotation(uint8_t annotationType,
std::optional<uint8_t> numElements) {
- // Allowed types: INT
+ // Allowed types: BOOL
if (mValues.empty() || annotationType != BOOL_TYPE || !checkPreviousValueType(INT) ||
numElements) {
+ VLOG("Atom ID %d error while parseExclusiveStateAnnotation()", mTagId);
mValid = false;
return;
}
@@ -344,6 +366,7 @@
// Allowed types: INT
if (mValues.empty() || annotationType != INT32_TYPE || !checkPreviousValueType(INT) ||
numElements) {
+ VLOG("Atom ID %d error while parseTriggerStateResetAnnotation()", mTagId);
mValid = false;
return;
}
@@ -353,9 +376,10 @@
void LogEvent::parseStateNestedAnnotation(uint8_t annotationType,
std::optional<uint8_t> numElements) {
- // Allowed types: INT
+ // Allowed types: BOOL
if (mValues.empty() || annotationType != BOOL_TYPE || !checkPreviousValueType(INT) ||
numElements) {
+ VLOG("Atom ID %d error while parseStateNestedAnnotation()", mTagId);
mValid = false;
return;
}
@@ -364,6 +388,38 @@
mValues[mValues.size() - 1].mAnnotations.setNested(nested);
}
+void LogEvent::parseRestrictionCategoryAnnotation(uint8_t annotationType) {
+ // Allowed types: INT, field value should be empty since this is atom-level annotation.
+ if (!mValues.empty() || annotationType != INT32_TYPE) {
+ mValid = false;
+ return;
+ }
+ int value = readNextValue<int32_t>();
+ // should be one of predefined category in StatsLog.java
+ switch (value) {
+ // Only diagnostic is currently supported for use.
+ case ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC:
+ break;
+ default:
+ mValid = false;
+ return;
+ }
+ mRestrictionCategory = static_cast<StatsdRestrictionCategory>(value);
+ return;
+}
+
+void LogEvent::parseFieldRestrictionAnnotation(uint8_t annotationType) {
+ // Allowed types: BOOL
+ if (mValues.empty() || annotationType != BOOL_TYPE) {
+ mValid = false;
+ return;
+ }
+ // Read the value so that the rest of the event is correctly parsed
+ // TODO: store the field annotations once the metrics need to parse them.
+ readNextValue<uint8_t>();
+ return;
+}
+
// firstUidInChainIndex is a default parameter that is only needed when parsing
// annotations for attribution chains.
// numElements is a default param that is only needed when parsing annotations for repeated fields
@@ -374,65 +430,123 @@
uint8_t annotationType = readNextValue<uint8_t>();
switch (annotationId) {
- case ANNOTATION_ID_IS_UID:
+ case ASTATSLOG_ANNOTATION_ID_IS_UID:
parseIsUidAnnotation(annotationType, numElements);
break;
- case ANNOTATION_ID_TRUNCATE_TIMESTAMP:
+ case ASTATSLOG_ANNOTATION_ID_TRUNCATE_TIMESTAMP:
parseTruncateTimestampAnnotation(annotationType);
break;
- case ANNOTATION_ID_PRIMARY_FIELD:
+ case ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD:
parsePrimaryFieldAnnotation(annotationType, numElements, firstUidInChainIndex);
break;
- case ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID:
+ case ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID:
parsePrimaryFieldFirstUidAnnotation(annotationType, firstUidInChainIndex);
break;
- case ANNOTATION_ID_EXCLUSIVE_STATE:
+ case ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE:
parseExclusiveStateAnnotation(annotationType, numElements);
break;
- case ANNOTATION_ID_TRIGGER_STATE_RESET:
+ case ASTATSLOG_ANNOTATION_ID_TRIGGER_STATE_RESET:
parseTriggerStateResetAnnotation(annotationType, numElements);
break;
- case ANNOTATION_ID_STATE_NESTED:
+ case ASTATSLOG_ANNOTATION_ID_STATE_NESTED:
parseStateNestedAnnotation(annotationType, numElements);
break;
+ case ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY:
+ if (IsAtLeastU()) {
+ parseRestrictionCategoryAnnotation(annotationType);
+ } else {
+ mValid = false;
+ }
+ break;
+ // Currently field restrictions are ignored, so we parse but do not store them.
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_PERIPHERAL_DEVICE_INFO:
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_APP_USAGE:
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_APP_ACTIVITY:
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_HEALTH_CONNECT:
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_ACCESSIBILITY:
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_SYSTEM_SEARCH:
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_USER_ENGAGEMENT:
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_AMBIENT_SENSING:
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_DEMOGRAPHIC_CLASSIFICATION:
+ if (IsAtLeastU()) {
+ parseFieldRestrictionAnnotation(annotationType);
+ } else {
+ mValid = false;
+ }
+ break;
default:
+ VLOG("Atom ID %d error while parseAnnotations() - wrong annotationId(%d)", mTagId,
+ annotationId);
mValid = false;
return;
}
}
}
-// This parsing logic is tied to the encoding scheme used in StatsEvent.java and
-// stats_event.c
-bool LogEvent::parseBuffer(uint8_t* buf, size_t len) {
+LogEvent::BodyBufferInfo LogEvent::parseHeader(const uint8_t* buf, size_t len) {
+ BodyBufferInfo bodyInfo;
+
+ mParsedHeaderOnly = true;
+
mBuf = buf;
mRemainingLen = (uint32_t)len;
- int32_t pos[] = {1, 1, 1};
- bool last[] = {false, false, false};
-
// Beginning of buffer is OBJECT_TYPE | NUM_FIELDS | TIMESTAMP | ATOM_ID
uint8_t typeInfo = readNextValue<uint8_t>();
- if (getTypeId(typeInfo) != OBJECT_TYPE) mValid = false;
+ if (getTypeId(typeInfo) != OBJECT_TYPE) {
+ mValid = false;
+ mBuf = nullptr;
+ return bodyInfo;
+ }
uint8_t numElements = readNextValue<uint8_t>();
- if (numElements < 2 || numElements > INT8_MAX) mValid = false;
+ if (numElements < 2 || numElements > INT8_MAX) {
+ mValid = false;
+ mBuf = nullptr;
+ return bodyInfo;
+ }
typeInfo = readNextValue<uint8_t>();
- if (getTypeId(typeInfo) != INT64_TYPE) mValid = false;
+ if (getTypeId(typeInfo) != INT64_TYPE) {
+ mValid = false;
+ mBuf = nullptr;
+ return bodyInfo;
+ }
mElapsedTimestampNs = readNextValue<int64_t>();
numElements--;
typeInfo = readNextValue<uint8_t>();
- if (getTypeId(typeInfo) != INT32_TYPE) mValid = false;
+ if (getTypeId(typeInfo) != INT32_TYPE) {
+ mValid = false;
+ mBuf = nullptr;
+ return bodyInfo;
+ }
mTagId = readNextValue<int32_t>();
numElements--;
+
parseAnnotations(getNumAnnotations(typeInfo)); // atom-level annotations
- for (pos[0] = 1; pos[0] <= numElements && mValid; pos[0]++) {
- last[0] = (pos[0] == numElements);
+ bodyInfo.numElements = numElements;
+ bodyInfo.buffer = mBuf;
+ bodyInfo.bufferSize = mRemainingLen;
- typeInfo = readNextValue<uint8_t>();
+ mBuf = nullptr;
+ return bodyInfo;
+}
+
+bool LogEvent::parseBody(const BodyBufferInfo& bodyInfo) {
+ mParsedHeaderOnly = false;
+
+ mBuf = bodyInfo.buffer;
+ mRemainingLen = (uint32_t)bodyInfo.bufferSize;
+
+ int32_t pos[] = {1, 1, 1};
+ bool last[] = {false, false, false};
+
+ for (pos[0] = 1; pos[0] <= bodyInfo.numElements && mValid; pos[0]++) {
+ last[0] = (pos[0] == bodyInfo.numElements);
+
+ uint8_t typeInfo = readNextValue<uint8_t>();
uint8_t typeId = getTypeId(typeInfo);
switch (typeId) {
@@ -464,7 +578,7 @@
parseArray(pos, /*depth=*/0, last, getNumAnnotations(typeInfo));
break;
case ERROR_TYPE:
- /* mErrorBitmask =*/ readNextValue<int32_t>();
+ /* mErrorBitmask =*/readNextValue<int32_t>();
mValid = false;
break;
default:
@@ -478,12 +592,22 @@
return mValid;
}
-uint8_t LogEvent::getTypeId(uint8_t typeInfo) {
- return typeInfo & 0x0F; // type id in lower 4 bytes
-}
+// This parsing logic is tied to the encoding scheme used in StatsEvent.java and
+// stats_event.c
+bool LogEvent::parseBuffer(const uint8_t* buf, size_t len) {
+ BodyBufferInfo bodyInfo = parseHeader(buf, len);
-uint8_t LogEvent::getNumAnnotations(uint8_t typeInfo) {
- return (typeInfo >> 4) & 0x0F; // num annotations in upper 4 bytes
+ // emphasize intention to parse the body, however atom data could be incomplete
+ // if header/body parsing was failed due to invalid buffer content for example
+ mParsedHeaderOnly = false;
+
+ // early termination if header is invalid
+ if (!mValid) {
+ mBuf = nullptr;
+ return false;
+ }
+
+ return parseBody(bodyInfo);
}
int64_t LogEvent::GetLong(size_t key, status_t* err) const {
@@ -615,9 +739,25 @@
string result;
result += StringPrintf("{ uid(%d) %lld %lld (%d)", mLogUid, (long long)mLogdTimestampNs,
(long long)mElapsedTimestampNs, mTagId);
+ string annotations;
+ if (mTruncateTimestamp) {
+ annotations = "TRUNCATE_TS";
+ }
+ if (mResetState != -1) {
+ annotations += annotations.size() ? ", RESET_STATE" : "RESET_STATE";
+ }
+ if (annotations.size()) {
+ result += " [" + annotations + "] ";
+ }
+
+ if (isParsedHeaderOnly()) {
+ result += " ParsedHeaderOnly }";
+ return result;
+ }
+
for (const auto& value : mValues) {
- result +=
- StringPrintf("%#x", value.mField.getField()) + "->" + value.mValue.toString() + " ";
+ result += StringPrintf("%#x", value.mField.getField()) + "->" + value.mValue.toString();
+ result += value.mAnnotations.toString() + " ";
}
result += " }";
return result;
diff --git a/statsd/src/logd/LogEvent.h b/statsd/src/logd/LogEvent.h
index 88dd9b7..d75b446 100644
--- a/statsd/src/logd/LogEvent.h
+++ b/statsd/src/logd/LogEvent.h
@@ -24,6 +24,7 @@
#include <vector>
#include "FieldValue.h"
+#include "utils/RestrictedPolicyManager.h"
namespace android {
namespace os {
@@ -89,9 +90,30 @@
* should not include the android_log_header_t or the StatsEventTag)
* \param len size of the buffer
*
- * \return success of the initialization
+ * \return success of the parsing
*/
- bool parseBuffer(uint8_t* buf, size_t len);
+ bool parseBuffer(const uint8_t* buf, size_t len);
+
+ struct BodyBufferInfo {
+ const uint8_t* buffer = nullptr;
+ size_t bufferSize = 0;
+ uint8_t numElements = 0;
+ };
+
+ /**
+ * @brief Parses atom header which consists of atom id, timestamp
+ * and atom level annotations
+ * Updates the value of isValid()
+ * @return BodyBufferInfo to be used for parseBody()
+ */
+ BodyBufferInfo parseHeader(const uint8_t* buf, size_t len);
+
+ /**
+ * @brief Parses atom body which consists of header.numElements elements
+ * Should be called only with BodyBufferInfo if when logEvent.isValid() == true
+ * \return success of the parsing
+ */
+ bool parseBody(const BodyBufferInfo& bodyInfo);
// Constructs a BinaryPushStateChanged LogEvent from API call.
explicit LogEvent(const std::string& trainName, int64_t trainVersionCode, bool requiresStaging,
@@ -230,10 +252,25 @@
}
/**
+ * @brief Returns true if only header was parsed
+ */
+ bool isParsedHeaderOnly() const {
+ return mParsedHeaderOnly;
+ }
+
+ /**
* Only use this if copy is absolutely needed.
*/
LogEvent(const LogEvent&) = default;
+ inline StatsdRestrictionCategory getRestrictionCategory() const {
+ return mRestrictionCategory;
+ }
+
+ inline bool isRestricted() const {
+ return mRestrictionCategory != CATEGORY_NO_RESTRICTION;
+ }
+
private:
void parseInt32(int32_t* pos, int32_t depth, bool* last, uint8_t numAnnotations);
void parseInt64(int32_t* pos, int32_t depth, bool* last, uint8_t numAnnotations);
@@ -257,18 +294,23 @@
void parseTriggerStateResetAnnotation(uint8_t annotationType,
std::optional<uint8_t> numElements);
void parseStateNestedAnnotation(uint8_t annotationType, std::optional<uint8_t> numElements);
+ void parseRestrictionCategoryAnnotation(uint8_t annotationType);
+ void parseFieldRestrictionAnnotation(uint8_t annotationType);
bool checkPreviousValueType(Type expected);
+ bool getRestrictedMetricsFlag();
/**
* The below two variables are only valid during the execution of
* parseBuffer. There are no guarantees about the state of these variables
* before/after.
*/
- uint8_t* mBuf;
+ const uint8_t* mBuf;
uint32_t mRemainingLen; // number of valid bytes left in the buffer being parsed
bool mValid = true; // stores whether the event we received from the socket is valid
+ bool mParsedHeaderOnly = false; // stores whether the only header was parsed skipping the body
+
/**
* Side-effects:
* If there is enough space in buffer to read value of type T
@@ -309,9 +351,6 @@
mValues.push_back(FieldValue(f, v));
}
- uint8_t getTypeId(uint8_t typeInfo);
- uint8_t getNumAnnotations(uint8_t typeInfo);
-
// The items are naturally sorted in DFS order as we read them. this allows us to do fast
// matching.
std::vector<FieldValue> mValues;
@@ -335,6 +374,7 @@
// Annotations
bool mTruncateTimestamp = false;
int mResetState = -1;
+ StatsdRestrictionCategory mRestrictionCategory = CATEGORY_NO_RESTRICTION;
size_t mNumUidFields = 0;
diff --git a/statsd/src/logd/LogEventQueue.h b/statsd/src/logd/LogEventQueue.h
index 9dda3d2..e0e2f4e 100644
--- a/statsd/src/logd/LogEventQueue.h
+++ b/statsd/src/logd/LogEventQueue.h
@@ -16,12 +16,14 @@
#pragma once
-#include "LogEvent.h"
+#include <gtest/gtest_prod.h>
#include <condition_variable>
#include <mutex>
#include <queue>
+#include "LogEvent.h"
+
namespace android {
namespace os {
namespace statsd {
@@ -50,6 +52,16 @@
std::condition_variable mCondition;
std::mutex mMutex;
std::queue<std::unique_ptr<LogEvent>> mQueue;
+
+ friend class SocketParseMessageTest;
+
+ FRIEND_TEST(SocketParseMessageTestNoFiltering, TestProcessMessageNoFiltering);
+ FRIEND_TEST(SocketParseMessageTestNoFiltering,
+ TestProcessMessageNoFilteringWithEmptySetExplicitSet);
+ FRIEND_TEST(SocketParseMessageTest, TestProcessMessageFilterEmptySet);
+ FRIEND_TEST(SocketParseMessageTest, TestProcessMessageFilterCompleteSet);
+ FRIEND_TEST(SocketParseMessageTest, TestProcessMessageFilterPartialSet);
+ FRIEND_TEST(SocketParseMessageTest, TestProcessMessageFilterToggle);
};
} // namespace statsd
diff --git a/statsd/src/main.cpp b/statsd/src/main.cpp
index bd2c0e4..14b31a0 100644
--- a/statsd/src/main.cpp
+++ b/statsd/src/main.cpp
@@ -17,19 +17,21 @@
#define STATSD_DEBUG false // STOPSHIP if true
#include "Log.h"
-#include "StatsService.h"
-#include "flags/FlagProvider.h"
-#include "socket/StatsSocketListener.h"
-
+#include <android/binder_ibinder.h>
+#include <android/binder_ibinder_platform.h>
#include <android/binder_interface_utils.h>
-#include <android/binder_process.h>
#include <android/binder_manager.h>
-#include <utils/Looper.h>
-
+#include <android/binder_process.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include <utils/Looper.h>
+
+#include "StatsService.h"
+#include "flags/FlagProvider.h"
+#include "packages/UidMap.h"
+#include "socket/StatsSocketListener.h"
using namespace android;
using namespace android::os::statsd;
@@ -39,20 +41,12 @@
shared_ptr<StatsService> gStatsService = nullptr;
sp<StatsSocketListener> gSocketListener = nullptr;
+int gCtrlPipe[2];
void signalHandler(int sig) {
- if (sig == SIGPIPE) {
- // ShellSubscriber uses SIGPIPE as a signal to detect the end of the
- // client process. Don't prematurely exit(1) here. Instead, ignore the
- // signal and allow the write call to return EPIPE.
- ALOGI("statsd received SIGPIPE. Ignoring signal.");
- return;
- }
-
- if (gSocketListener != nullptr) gSocketListener->stopListener();
- if (gStatsService != nullptr) gStatsService->Terminate();
ALOGW("statsd terminated on receiving signal %d.", sig);
- exit(1);
+ const char c = 'q';
+ write(gCtrlPipe[1], &c, 1);
}
void registerSignalHandlers()
@@ -60,11 +54,15 @@
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
- sa.sa_handler = signalHandler;
+
+ sa.sa_handler = SIG_IGN;
+ // ShellSubscriber uses SIGPIPE as a signal to detect the end of the
+ // client process. Don't prematurely exit(1) here. Instead, ignore the
+ // signal and allow the write call to return EPIPE.
sigaction(SIGPIPE, &sa, nullptr);
- sigaction(SIGHUP, &sa, nullptr);
- sigaction(SIGINT, &sa, nullptr);
- sigaction(SIGQUIT, &sa, nullptr);
+
+ pipe2(gCtrlPipe, O_CLOEXEC);
+ sa.sa_handler = signalHandler;
sigaction(SIGTERM, &sa, nullptr);
}
@@ -77,28 +75,46 @@
ABinderProcess_startThreadPool();
std::shared_ptr<LogEventQueue> eventQueue =
- std::make_shared<LogEventQueue>(4000 /*buffer limit. Buffer is NOT pre-allocated*/);
+ std::make_shared<LogEventQueue>(8000 /*buffer limit. Buffer is NOT pre-allocated*/);
// Initialize boot flags
- FlagProvider::getInstance().initBootFlags({});
+ FlagProvider::getInstance().initBootFlags(
+ {OPTIMIZATION_SOCKET_PARSING_FLAG, STATSD_INIT_COMPLETED_NO_DELAY_FLAG});
+ sp<UidMap> uidMap = UidMap::getInstance();
+
+ const bool logsFilteringEnabled = FlagProvider::getInstance().getBootFlagBool(
+ OPTIMIZATION_SOCKET_PARSING_FLAG, FLAG_FALSE);
+ std::shared_ptr<LogEventFilter> logEventFilter =
+ logsFilteringEnabled ? std::make_shared<LogEventFilter>() : nullptr;
+
+ const int initEventDelay = FlagProvider::getInstance().getBootFlagBool(
+ STATSD_INIT_COMPLETED_NO_DELAY_FLAG, FLAG_FALSE)
+ ? 0
+ : StatsService::kStatsdInitDelaySecs;
// Create the service
- gStatsService = SharedRefBase::make<StatsService>(looper, eventQueue);
+ gStatsService =
+ SharedRefBase::make<StatsService>(uidMap, eventQueue, logEventFilter, initEventDelay);
+ auto binder = gStatsService->asBinder();
+
+ // We want to be able to ask for the selinux context of callers:
+ if (__builtin_available(android __ANDROID_API_U__, *)) {
+ AIBinder_setRequestingSid(binder.get(), true);
+ }
+
// TODO(b/149582373): Set DUMP_FLAG_PROTO once libbinder_ndk supports
// setting dumpsys priorities.
- binder_status_t status = AServiceManager_addService(gStatsService->asBinder().get(), "stats");
+ binder_status_t status = AServiceManager_addService(binder.get(), "stats");
if (status != STATUS_OK) {
ALOGE("Failed to add service as AIDL service");
return -1;
}
- registerSignalHandlers();
-
gStatsService->sayHiToStatsCompanion();
gStatsService->Startup();
- gSocketListener = new StatsSocketListener(eventQueue);
+ gSocketListener = new StatsSocketListener(eventQueue, logEventFilter);
ALOGI("Statsd starts to listen to socket.");
// Backlog and /proc/sys/net/unix/max_dgram_qlen set to large value
@@ -106,6 +122,22 @@
exit(1);
}
+ // Use self-pipe to notify this thread to gracefully quit
+ // when receiving SIGTERM
+ registerSignalHandlers();
+ std::thread([] {
+ while (true) {
+ char c;
+ int i = read(gCtrlPipe[0], &c, 1);
+ if (i < 0) {
+ if (errno == EINTR) continue;
+ }
+ gSocketListener->stopListener();
+ gStatsService->Terminate();
+ exit(1);
+ }
+ }).detach();
+
// Loop forever -- the reports run on this thread in a handler, and the
// binder calls remain responsive in their pool of one thread.
while (true) {
diff --git a/statsd/src/matchers/AtomMatchingTracker.h b/statsd/src/matchers/AtomMatchingTracker.h
index cdb1716..3e35d9b 100644
--- a/statsd/src/matchers/AtomMatchingTracker.h
+++ b/statsd/src/matchers/AtomMatchingTracker.h
@@ -17,16 +17,17 @@
#ifndef ATOM_MATCHING_TRACKER_H
#define ATOM_MATCHING_TRACKER_H
-#include "src/statsd_config.pb.h"
-#include "logd/LogEvent.h"
-#include "matchers/matcher_util.h"
-
#include <utils/RefBase.h>
#include <set>
#include <unordered_map>
#include <vector>
+#include "guardrail/StatsdStats.h"
+#include "logd/LogEvent.h"
+#include "matchers/matcher_util.h"
+#include "src/statsd_config.pb.h"
+
namespace android {
namespace os {
namespace statsd {
@@ -47,17 +48,17 @@
// CombinationAtomMatchingTrackers using DFS.
// stack: a bit map to record which matcher has been visited on the stack. This is for detecting
// circle dependency.
- virtual bool init(const std::vector<AtomMatcher>& allAtomMatchers,
- const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- const std::unordered_map<int64_t, int>& matcherMap,
- std::vector<bool>& stack) = 0;
+ virtual optional<InvalidConfigReason> init(
+ const std::vector<AtomMatcher>& allAtomMatchers,
+ const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ const std::unordered_map<int64_t, int>& matcherMap, std::vector<bool>& stack) = 0;
// Update appropriate state on config updates. Primarily, all indices need to be updated.
// This matcher and all of its children are guaranteed to be preserved across the update.
// matcher: the AtomMatcher proto from the config.
// index: the index of this matcher in mAllAtomMatchingTrackers.
// atomMatchingTrackerMap: map from matcher id to index in mAllAtomMatchingTrackers
- virtual bool onConfigUpdated(
+ virtual optional<InvalidConfigReason> onConfigUpdated(
const AtomMatcher& matcher, const int index,
const std::unordered_map<int64_t, int>& atomMatchingTrackerMap) = 0;
@@ -87,6 +88,10 @@
return mProtoHash;
}
+ bool isInitialized() {
+ return mInitialized;
+ }
+
protected:
// Name of this matching. We don't really need the name, but it makes log message easy to debug.
const int64_t mId;
@@ -106,8 +111,8 @@
// Used to determine if the definition of this matcher has changed across a config update.
const uint64_t mProtoHash;
- FRIEND_TEST(MetricsManagerTest, TestCreateAtomMatchingTrackerSimple);
- FRIEND_TEST(MetricsManagerTest, TestCreateAtomMatchingTrackerCombination);
+ FRIEND_TEST(MetricsManagerUtilTest, TestCreateAtomMatchingTrackerSimple);
+ FRIEND_TEST(MetricsManagerUtilTest, TestCreateAtomMatchingTrackerCombination);
FRIEND_TEST(ConfigUpdateTest, TestUpdateMatchers);
};
diff --git a/statsd/src/matchers/CombinationAtomMatchingTracker.cpp b/statsd/src/matchers/CombinationAtomMatchingTracker.cpp
index 45685ce..846a530 100644
--- a/statsd/src/matchers/CombinationAtomMatchingTracker.cpp
+++ b/statsd/src/matchers/CombinationAtomMatchingTracker.cpp
@@ -36,12 +36,12 @@
CombinationAtomMatchingTracker::~CombinationAtomMatchingTracker() {
}
-bool CombinationAtomMatchingTracker::init(
+optional<InvalidConfigReason> CombinationAtomMatchingTracker::init(
const vector<AtomMatcher>& allAtomMatchers,
const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const unordered_map<int64_t, int>& matcherMap, vector<bool>& stack) {
if (mInitialized) {
- return true;
+ return nullopt;
}
// mark this node as visited in the recursion stack.
@@ -51,20 +51,26 @@
// LogicalOperation is missing in the config
if (!matcher.has_operation()) {
- return false;
+ return createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_MATCHER_NO_OPERATION,
+ mId);
}
mLogicalOperation = matcher.operation();
if (mLogicalOperation == LogicalOperation::NOT && matcher.matcher_size() != 1) {
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_NOT_OPERATION_IS_NOT_UNARY, mId);
}
for (const auto& child : matcher.matcher()) {
auto pair = matcherMap.find(child);
if (pair == matcherMap.end()) {
ALOGW("Matcher %lld not found in the config", (long long)child);
- return false;
+ optional<InvalidConfigReason> invalidConfigReason =
+ createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_CHILD_NOT_FOUND, mId);
+ invalidConfigReason->matcherIds.push_back(child);
+ return invalidConfigReason;
}
int childIndex = pair->second;
@@ -72,13 +78,18 @@
// if the child is a visited node in the recursion -> circle detected.
if (stack[childIndex]) {
ALOGE("Circle detected in matcher config");
- return false;
+ optional<InvalidConfigReason> invalidConfigReason =
+ createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_MATCHER_CYCLE, mId);
+ invalidConfigReason->matcherIds.push_back(child);
+ return invalidConfigReason;
}
-
- if (!allAtomMatchingTrackers[childIndex]->init(allAtomMatchers, allAtomMatchingTrackers,
- matcherMap, stack)) {
+ optional<InvalidConfigReason> invalidConfigReason =
+ allAtomMatchingTrackers[childIndex]->init(allAtomMatchers, allAtomMatchingTrackers,
+ matcherMap, stack);
+ if (invalidConfigReason.has_value()) {
ALOGW("child matcher init failed %lld", (long long)child);
- return false;
+ invalidConfigReason->matcherIds.push_back(mId);
+ return invalidConfigReason;
}
mChildren.push_back(childIndex);
@@ -90,10 +101,10 @@
mInitialized = true;
// unmark this node in the recursion stack.
stack[mIndex] = false;
- return true;
+ return nullopt;
}
-bool CombinationAtomMatchingTracker::onConfigUpdated(
+optional<InvalidConfigReason> CombinationAtomMatchingTracker::onConfigUpdated(
const AtomMatcher& matcher, const int index,
const unordered_map<int64_t, int>& atomMatchingTrackerMap) {
mIndex = index;
@@ -103,11 +114,15 @@
const auto& pair = atomMatchingTrackerMap.find(child);
if (pair == atomMatchingTrackerMap.end()) {
ALOGW("Matcher %lld not found in the config", (long long)child);
- return false;
+ optional<InvalidConfigReason> invalidConfigReason =
+ createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_CHILD_NOT_FOUND, matcher.id());
+ invalidConfigReason->matcherIds.push_back(child);
+ return invalidConfigReason;
}
mChildren.push_back(pair->second);
}
- return true;
+ return nullopt;
}
void CombinationAtomMatchingTracker::onLogEvent(
diff --git a/statsd/src/matchers/CombinationAtomMatchingTracker.h b/statsd/src/matchers/CombinationAtomMatchingTracker.h
index cf02086..edcbb49 100644
--- a/statsd/src/matchers/CombinationAtomMatchingTracker.h
+++ b/statsd/src/matchers/CombinationAtomMatchingTracker.h
@@ -31,12 +31,14 @@
public:
CombinationAtomMatchingTracker(const int64_t& id, const int index, const uint64_t protoHash);
- bool init(const std::vector<AtomMatcher>& allAtomMatchers,
- const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- const std::unordered_map<int64_t, int>& matcherMap, std::vector<bool>& stack);
+ optional<InvalidConfigReason> init(
+ const std::vector<AtomMatcher>& allAtomMatchers,
+ const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ const std::unordered_map<int64_t, int>& matcherMap, std::vector<bool>& stack);
- bool onConfigUpdated(const AtomMatcher& matcher, const int index,
- const std::unordered_map<int64_t, int>& atomMatchingTrackerMap) override;
+ optional<InvalidConfigReason> onConfigUpdated(
+ const AtomMatcher& matcher, const int index,
+ const std::unordered_map<int64_t, int>& atomMatchingTrackerMap) override;
~CombinationAtomMatchingTracker();
diff --git a/statsd/src/matchers/SimpleAtomMatchingTracker.cpp b/statsd/src/matchers/SimpleAtomMatchingTracker.cpp
index 76bd476..a0e30b2 100644
--- a/statsd/src/matchers/SimpleAtomMatchingTracker.cpp
+++ b/statsd/src/matchers/SimpleAtomMatchingTracker.cpp
@@ -42,20 +42,28 @@
SimpleAtomMatchingTracker::~SimpleAtomMatchingTracker() {
}
-bool SimpleAtomMatchingTracker::init(const vector<AtomMatcher>& allAtomMatchers,
- const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- const unordered_map<int64_t, int>& matcherMap,
- vector<bool>& stack) {
+optional<InvalidConfigReason> SimpleAtomMatchingTracker::init(
+ const vector<AtomMatcher>& allAtomMatchers,
+ const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ const unordered_map<int64_t, int>& matcherMap, vector<bool>& stack) {
// no need to do anything.
- return mInitialized;
+ if (!mInitialized) {
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_TRACKER_NOT_INITIALIZED, mId);
+ }
+ return nullopt;
}
-bool SimpleAtomMatchingTracker::onConfigUpdated(
+optional<InvalidConfigReason> SimpleAtomMatchingTracker::onConfigUpdated(
const AtomMatcher& matcher, const int index,
const unordered_map<int64_t, int>& atomMatchingTrackerMap) {
mIndex = index;
// Do not need to update mMatcher since the matcher must be identical across the update.
- return mInitialized;
+ if (!mInitialized) {
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_TRACKER_NOT_INITIALIZED, mId);
+ }
+ return nullopt;
}
void SimpleAtomMatchingTracker::onLogEvent(
diff --git a/statsd/src/matchers/SimpleAtomMatchingTracker.h b/statsd/src/matchers/SimpleAtomMatchingTracker.h
index 13ef344..70585f9 100644
--- a/statsd/src/matchers/SimpleAtomMatchingTracker.h
+++ b/statsd/src/matchers/SimpleAtomMatchingTracker.h
@@ -35,13 +35,14 @@
~SimpleAtomMatchingTracker();
- bool init(const std::vector<AtomMatcher>& allAtomMatchers,
- const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- const std::unordered_map<int64_t, int>& matcherMap,
- std::vector<bool>& stack) override;
+ optional<InvalidConfigReason> init(
+ const std::vector<AtomMatcher>& allAtomMatchers,
+ const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ const std::unordered_map<int64_t, int>& matcherMap, std::vector<bool>& stack) override;
- bool onConfigUpdated(const AtomMatcher& matcher, const int index,
- const std::unordered_map<int64_t, int>& atomMatchingTrackerMap) override;
+ optional<InvalidConfigReason> onConfigUpdated(
+ const AtomMatcher& matcher, const int index,
+ const std::unordered_map<int64_t, int>& atomMatchingTrackerMap) override;
void onLogEvent(const LogEvent& event,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
diff --git a/statsd/src/matchers/matcher_util.cpp b/statsd/src/matchers/matcher_util.cpp
index 27b9fb2..20450dd 100644
--- a/statsd/src/matchers/matcher_util.cpp
+++ b/statsd/src/matchers/matcher_util.cpp
@@ -16,9 +16,12 @@
#define STATSD_DEBUG false // STOPSHIP if true
#include "Log.h"
-#include "src/statsd_config.pb.h"
-#include "matchers/AtomMatchingTracker.h"
#include "matchers/matcher_util.h"
+
+#include <fnmatch.h>
+
+#include "matchers/AtomMatchingTracker.h"
+#include "src/statsd_config.pb.h"
#include "stats_util.h"
using std::set;
@@ -97,6 +100,33 @@
return false;
}
+bool tryMatchWildcardString(const sp<UidMap>& uidMap, const FieldValue& fieldValue,
+ const string& wildcardPattern) {
+ if (isAttributionUidField(fieldValue) || isUidField(fieldValue)) {
+ int uid = fieldValue.mValue.int_value;
+ // TODO(b/236886985): replace aid/uid mapping with efficient bidirectional container
+ // AidToUidMapping will never have uids above 10000
+ if (uid < 10000) {
+ for (auto aidIt = UidMap::sAidToUidMapping.begin();
+ aidIt != UidMap::sAidToUidMapping.end(); ++aidIt) {
+ if ((int)aidIt->second == uid) {
+ // Assumes there is only one aid mapping for each uid
+ return fnmatch(wildcardPattern.c_str(), aidIt->first.c_str(), 0) == 0;
+ }
+ }
+ }
+ std::set<string> packageNames = uidMap->getAppNamesFromUid(uid, true /* normalize*/);
+ for (const auto& packageName : packageNames) {
+ if (fnmatch(wildcardPattern.c_str(), packageName.c_str(), 0) == 0) {
+ return true;
+ }
+ }
+ } else if (fieldValue.mValue.getType() == STRING) {
+ return fnmatch(wildcardPattern.c_str(), fieldValue.mValue.str_value.c_str(), 0) == 0;
+ }
+ return false;
+}
+
bool matchesSimple(const sp<UidMap>& uidMap, const FieldValueMatcher& matcher,
const vector<FieldValue>& values, int start, int end, int depth) {
if (depth > 2) {
@@ -237,13 +267,18 @@
case FieldValueMatcher::ValueMatcherCase::kNeqAnyString: {
const auto& str_list = matcher.neq_any_string();
for (int i = start; i < end; i++) {
+ bool notEqAll = true;
for (const auto& str : str_list.str_value()) {
if (tryMatchString(uidMap, values[i], str)) {
- return false;
+ notEqAll = false;
+ break;
}
}
+ if (notEqAll) {
+ return true;
+ }
}
- return true;
+ return false;
}
case FieldValueMatcher::ValueMatcherCase::kEqAnyString: {
const auto& str_list = matcher.eq_any_string();
@@ -256,6 +291,41 @@
}
return false;
}
+ case FieldValueMatcher::ValueMatcherCase::kEqWildcardString: {
+ for (int i = start; i < end; i++) {
+ if (tryMatchWildcardString(uidMap, values[i], matcher.eq_wildcard_string())) {
+ return true;
+ }
+ }
+ return false;
+ }
+ case FieldValueMatcher::ValueMatcherCase::kEqAnyWildcardString: {
+ const auto& str_list = matcher.eq_any_wildcard_string();
+ for (int i = start; i < end; i++) {
+ for (const auto& str : str_list.str_value()) {
+ if (tryMatchWildcardString(uidMap, values[i], str)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+ case FieldValueMatcher::ValueMatcherCase::kNeqAnyWildcardString: {
+ const auto& str_list = matcher.neq_any_wildcard_string();
+ for (int i = start; i < end; i++) {
+ bool notEqAll = true;
+ for (const auto& str : str_list.str_value()) {
+ if (tryMatchWildcardString(uidMap, values[i], str)) {
+ notEqAll = false;
+ break;
+ }
+ }
+ if (notEqAll) {
+ return true;
+ }
+ }
+ return false;
+ }
case FieldValueMatcher::ValueMatcherCase::kEqInt: {
for (int i = start; i < end; i++) {
if (values[i].mValue.getType() == INT &&
@@ -270,6 +340,46 @@
}
return false;
}
+ case FieldValueMatcher::ValueMatcherCase::kEqAnyInt: {
+ const auto& int_list = matcher.eq_any_int();
+ for (int i = start; i < end; i++) {
+ for (const int int_value : int_list.int_value()) {
+ if (values[i].mValue.getType() == INT &&
+ (int_value == values[i].mValue.int_value)) {
+ return true;
+ }
+ // eq_any_int covers both int and long.
+ if (values[i].mValue.getType() == LONG &&
+ (int_value == values[i].mValue.long_value)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+ case FieldValueMatcher::ValueMatcherCase::kNeqAnyInt: {
+ const auto& int_list = matcher.neq_any_int();
+ for (int i = start; i < end; i++) {
+ bool notEqAll = true;
+ for (const int int_value : int_list.int_value()) {
+ if (values[i].mValue.getType() == INT &&
+ (int_value == values[i].mValue.int_value)) {
+ notEqAll = false;
+ break;
+ }
+ // neq_any_int covers both int and long.
+ if (values[i].mValue.getType() == LONG &&
+ (int_value == values[i].mValue.long_value)) {
+ notEqAll = false;
+ break;
+ }
+ }
+ if (notEqAll) {
+ return true;
+ }
+ }
+ return false;
+ }
case FieldValueMatcher::ValueMatcherCase::kLtInt: {
for (int i = start; i < end; i++) {
if (values[i].mValue.getType() == INT &&
diff --git a/statsd/src/metrics/CountMetricProducer.cpp b/statsd/src/metrics/CountMetricProducer.cpp
index 67a7250..5829280 100644
--- a/statsd/src/metrics/CountMetricProducer.cpp
+++ b/statsd/src/metrics/CountMetricProducer.cpp
@@ -66,6 +66,7 @@
const int FIELD_ID_BUCKET_NUM = 4;
const int FIELD_ID_START_BUCKET_ELAPSED_MILLIS = 5;
const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 6;
+const int FIELD_ID_CONDITION_TRUE_NS = 7;
CountMetricProducer::CountMetricProducer(
const ConfigKey& key, const CountMetric& metric, const int conditionIndex,
@@ -118,8 +119,11 @@
flushIfNeededLocked(startTimeNs);
// Adjust start for partial bucket
mCurrentBucketStartTimeNs = startTimeNs;
+ mConditionTimer.newBucketStart(mCurrentBucketStartTimeNs, mCurrentBucketStartTimeNs);
+ mConditionTimer.onConditionChanged(mIsActive && mCondition == ConditionState::kTrue,
+ mCurrentBucketStartTimeNs);
- VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)metric.id(),
+ VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)mMetricId,
(long long)mBucketSizeNs, (long long)mTimeBaseNs);
}
@@ -127,7 +131,7 @@
VLOG("~CountMetricProducer() called");
}
-bool CountMetricProducer::onConfigUpdatedLocked(
+optional<InvalidConfigReason> CountMetricProducer::onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -141,31 +145,35 @@
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
vector<int>& metricsWithActivation) {
- if (!MetricProducer::onConfigUpdatedLocked(
- config, configIndex, metricIndex, allAtomMatchingTrackers,
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, matcherWizard,
- allConditionTrackers, conditionTrackerMap, wizard, metricToActivationMap,
- trackerToMetricMap, conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation)) {
- return false;
+ optional<InvalidConfigReason> invalidConfigReason = MetricProducer::onConfigUpdatedLocked(
+ config, configIndex, metricIndex, allAtomMatchingTrackers, oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, matcherWizard, allConditionTrackers, conditionTrackerMap,
+ wizard, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
const CountMetric& metric = config.count_metric(configIndex);
int trackerIndex;
// Update appropriate indices, specifically mConditionIndex and MetricsManager maps.
- if (!handleMetricWithAtomMatchingTrackers(metric.what(), metricIndex, false,
- allAtomMatchingTrackers, newAtomMatchingTrackerMap,
- trackerToMetricMap, trackerIndex)) {
- return false;
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.what(), mMetricId, metricIndex, false, allAtomMatchingTrackers,
+ newAtomMatchingTrackerMap, trackerToMetricMap, trackerIndex);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
- if (metric.has_condition() &&
- !handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, mConditionTrackerIndex,
- conditionToMetricMap)) {
- return false;
+ if (metric.has_condition()) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), mMetricId, metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, mConditionTrackerIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
+ }
}
- return true;
+ return nullopt;
}
void CountMetricProducer::onStateChanged(const int64_t eventTimeNs, const int32_t atomId,
@@ -275,6 +283,15 @@
(long long)(getBucketNumFromEndTimeNs(bucket.mBucketEndNs)));
}
protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_COUNT, (long long)bucket.mCount);
+
+ // We only write the condition timer value if the metric has a
+ // condition and isn't sliced by state or condition.
+ // TODO(b/268531179): Slice the condition timer by state and condition
+ if (mConditionTrackerIndex >= 0 && mSlicedStateAtoms.empty() && !mConditionSliced) {
+ protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_CONDITION_TRUE_NS,
+ (long long)bucket.mConditionTrueNs);
+ }
+
protoOutput->end(bucketInfoToken);
VLOG("\t bucket [%lld - %lld] count: %lld", (long long)bucket.mBucketStartNs,
(long long)bucket.mBucketEndNs, (long long)bucket.mCount);
@@ -299,6 +316,12 @@
const int64_t eventTime) {
VLOG("Metric %lld onConditionChanged", (long long)mMetricId);
mCondition = conditionMet ? ConditionState::kTrue : ConditionState::kFalse;
+
+ if (!mIsActive) {
+ return;
+ }
+
+ mConditionTimer.onConditionChanged(mCondition, eventTime);
}
bool CountMetricProducer::hitGuardRailLocked(const MetricDimensionKey& newKey) {
@@ -312,8 +335,11 @@
StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mMetricId, newTupleCount);
// 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
if (newTupleCount > StatsdStats::kDimensionKeySizeHardLimit) {
- ALOGE("CountMetric %lld dropping data for dimension key %s",
- (long long)mMetricId, newKey.toString().c_str());
+ if (!mHasHitGuardrail) {
+ ALOGE("CountMetric %lld dropping data for dimension key %s", (long long)mMetricId,
+ newKey.toString().c_str());
+ mHasHitGuardrail = true;
+ }
StatsdStats::getInstance().noteHardDimensionLimitReached(mMetricId);
return true;
}
@@ -408,6 +434,11 @@
} else {
info.mBucketEndNs = fullBucketEndTimeNs;
}
+
+ const auto [globalConditionTrueNs, globalConditionCorrectionNs] =
+ mConditionTimer.newBucketStart(eventTimeNs, nextBucketStartTimeNs);
+ info.mConditionTrueNs = globalConditionTrueNs;
+
for (const auto& counter : *mCurrentSlicedCounter) {
if (countPassesThreshold(counter.second)) {
info.mCount = counter.second;
@@ -450,6 +481,8 @@
// (Do not clear since the old one is still referenced in mAnomalyTrackers).
mCurrentSlicedCounter = std::make_shared<DimToValMap>();
mCurrentBucketStartTimeNs = nextBucketStartTimeNs;
+ // Reset mHasHitGuardrail boolean since bucket was reset
+ mHasHitGuardrail = false;
}
// Rough estimate of CountMetricProducer buffer stored. This number will be
@@ -463,6 +496,17 @@
return totalSize;
}
+void CountMetricProducer::onActiveStateChangedLocked(const int64_t eventTimeNs,
+ const bool isActive) {
+ MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
+
+ if (ConditionState::kTrue != mCondition) {
+ return;
+ }
+
+ mConditionTimer.onConditionChanged(isActive, eventTimeNs);
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/metrics/CountMetricProducer.h b/statsd/src/metrics/CountMetricProducer.h
index 9398467..3401d74 100644
--- a/statsd/src/metrics/CountMetricProducer.h
+++ b/statsd/src/metrics/CountMetricProducer.h
@@ -24,9 +24,10 @@
#include "MetricProducer.h"
#include "anomaly/AnomalyTracker.h"
+#include "condition/ConditionTimer.h"
#include "condition/ConditionTracker.h"
-#include "src/statsd_config.pb.h"
#include "matchers/matcher_util.h"
+#include "src/statsd_config.pb.h"
#include "stats_util.h"
namespace android {
@@ -37,6 +38,7 @@
int64_t mBucketStartNs;
int64_t mBucketEndNs;
int64_t mCount;
+ int64_t mConditionTrueNs;
};
class CountMetricProducer : public MetricProducer {
@@ -97,7 +99,9 @@
void flushCurrentBucketLocked(const int64_t& eventTimeNs,
const int64_t& nextBucketStartTimeNs) override;
- bool onConfigUpdatedLocked(
+ void onActiveStateChangedLocked(const int64_t eventTimeNs, const bool isActive) override;
+
+ optional<InvalidConfigReason> onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
diff --git a/statsd/src/metrics/DurationMetricProducer.cpp b/statsd/src/metrics/DurationMetricProducer.cpp
index 5b7a1c1..8efe96e 100644
--- a/statsd/src/metrics/DurationMetricProducer.cpp
+++ b/statsd/src/metrics/DurationMetricProducer.cpp
@@ -64,6 +64,7 @@
const int FIELD_ID_BUCKET_NUM = 4;
const int FIELD_ID_START_BUCKET_ELAPSED_MILLIS = 5;
const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 6;
+const int FIELD_ID_CONDITION_TRUE_NS = 7;
DurationMetricProducer::DurationMetricProducer(
const ConfigKey& key, const DurationMetric& metric, const int conditionIndex,
@@ -114,6 +115,7 @@
// Dimensions in what must be subset of internal dimensions
if (!subsetDimensions(mDimensionsInWhat, mInternalDimensions)) {
ALOGE("Dimensions in what must be a subset of the internal dimensions");
+ // TODO: Add invalidConfigReason
mValid = false;
}
@@ -127,6 +129,7 @@
translateFieldMatcher(link.fields_in_condition(), &mc.conditionFields);
if (!subsetDimensions(mc.metricFields, mInternalDimensions)) {
ALOGE(("Condition links must be a subset of the internal dimensions"));
+ // TODO: Add invalidConfigReason
mValid = false;
}
mMetric2ConditionLinks.push_back(mc);
@@ -142,6 +145,7 @@
translateFieldMatcher(stateLink.fields_in_state(), &ms.stateFields);
if (!subsetDimensions(ms.metricFields, mInternalDimensions)) {
ALOGE(("State links must be a subset of the dimensions in what internal dimensions"));
+ // TODO: Add invalidConfigReason
mValid = false;
}
mMetric2StateLinks.push_back(ms);
@@ -156,17 +160,20 @@
flushIfNeededLocked(startTimeNs);
// Adjust start for partial bucket
mCurrentBucketStartTimeNs = startTimeNs;
- VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)metric.id(),
+ VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)mMetricId,
(long long)mBucketSizeNs, (long long)mTimeBaseNs);
initTrueDimensions(whatIndex, startTimeNs);
+ mConditionTimer.newBucketStart(mCurrentBucketStartTimeNs, mCurrentBucketStartTimeNs);
+ mConditionTimer.onConditionChanged(mIsActive && mCondition == ConditionState::kTrue,
+ mCurrentBucketStartTimeNs);
}
DurationMetricProducer::~DurationMetricProducer() {
VLOG("~DurationMetric() called");
}
-bool DurationMetricProducer::onConfigUpdatedLocked(
+optional<InvalidConfigReason> DurationMetricProducer::onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -180,68 +187,76 @@
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
vector<int>& metricsWithActivation) {
- if (!MetricProducer::onConfigUpdatedLocked(
- config, configIndex, metricIndex, allAtomMatchingTrackers,
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, matcherWizard,
- allConditionTrackers, conditionTrackerMap, wizard, metricToActivationMap,
- trackerToMetricMap, conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation)) {
- return false;
+ optional<InvalidConfigReason> invalidConfigReason = MetricProducer::onConfigUpdatedLocked(
+ config, configIndex, metricIndex, allAtomMatchingTrackers, oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, matcherWizard, allConditionTrackers, conditionTrackerMap,
+ wizard, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
const DurationMetric& metric = config.duration_metric(configIndex);
const auto& what_it = conditionTrackerMap.find(metric.what());
if (what_it == conditionTrackerMap.end()) {
ALOGE("DurationMetric's \"what\" is not present in the config");
- return false;
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_DURATION_METRIC_WHAT_NOT_FOUND, mMetricId, metric.what());
}
const Predicate& durationWhat = config.predicate(what_it->second);
if (durationWhat.contents_case() != Predicate::ContentsCase::kSimplePredicate) {
ALOGE("DurationMetric's \"what\" must be a simple condition");
- return false;
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_DURATION_METRIC_WHAT_NOT_SIMPLE, mMetricId, metric.what());
}
const SimplePredicate& simplePredicate = durationWhat.simple_predicate();
// Update indices: mStartIndex, mStopIndex, mStopAllIndex, mConditionIndex and MetricsManager
// maps.
- if (!handleMetricWithAtomMatchingTrackers(simplePredicate.start(), metricIndex,
- metric.has_dimensions_in_what(),
- allAtomMatchingTrackers, newAtomMatchingTrackerMap,
- trackerToMetricMap, mStartIndex)) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ simplePredicate.start(), mMetricId, metricIndex, metric.has_dimensions_in_what(),
+ allAtomMatchingTrackers, newAtomMatchingTrackerMap, trackerToMetricMap, mStartIndex);
+ if (invalidConfigReason.has_value()) {
ALOGE("Duration metrics must specify a valid start event matcher");
- return false;
+ return invalidConfigReason;
}
- if (simplePredicate.has_stop() &&
- !handleMetricWithAtomMatchingTrackers(simplePredicate.stop(), metricIndex,
- metric.has_dimensions_in_what(),
- allAtomMatchingTrackers, newAtomMatchingTrackerMap,
- trackerToMetricMap, mStopIndex)) {
- return false;
+ if (simplePredicate.has_stop()) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ simplePredicate.stop(), mMetricId, metricIndex, metric.has_dimensions_in_what(),
+ allAtomMatchingTrackers, newAtomMatchingTrackerMap, trackerToMetricMap, mStopIndex);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
+ }
}
- if (simplePredicate.has_stop_all() &&
- !handleMetricWithAtomMatchingTrackers(simplePredicate.stop_all(), metricIndex,
- metric.has_dimensions_in_what(),
- allAtomMatchingTrackers, newAtomMatchingTrackerMap,
- trackerToMetricMap, mStopAllIndex)) {
- return false;
+ if (simplePredicate.has_stop_all()) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ simplePredicate.stop_all(), mMetricId, metricIndex, metric.has_dimensions_in_what(),
+ allAtomMatchingTrackers, newAtomMatchingTrackerMap, trackerToMetricMap,
+ mStopAllIndex);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
+ }
}
- if (metric.has_condition() &&
- !handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, mConditionTrackerIndex,
- conditionToMetricMap)) {
- return false;
+ if (metric.has_condition()) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), mMetricId, metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, mConditionTrackerIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
+ }
}
for (const auto& it : mCurrentSlicedDurationTrackerMap) {
it.second->onConfigUpdated(wizard, mConditionTrackerIndex);
}
- return true;
+ return nullopt;
}
void DurationMetricProducer::initTrueDimensions(const int whatIndex, const int64_t startTimeNs) {
@@ -344,8 +359,7 @@
// SlicedConditionChange optimization case 1:
// 1. If combination condition, logical operation is AND, only one sliced child predicate.
// 2. The links covers all dimension fields in the sliced child condition predicate.
-void DurationMetricProducer::onSlicedConditionMayChangeLocked_opt1(bool condition,
- const int64_t eventTime) {
+void DurationMetricProducer::onSlicedConditionMayChangeLocked_opt1(const int64_t eventTime) {
if (mMetric2ConditionLinks.size() != 1 ||
!mHasLinksToAllConditionDimensionsInTracker) {
return;
@@ -403,17 +417,16 @@
}
}
-void DurationMetricProducer::onSlicedConditionMayChangeInternalLocked(bool overallCondition,
- const int64_t eventTimeNs) {
+void DurationMetricProducer::onSlicedConditionMayChangeInternalLocked(const int64_t eventTimeNs) {
bool changeDimTrackable = mWizard->IsChangedDimensionTrackable(mConditionTrackerIndex);
if (changeDimTrackable && mHasLinksToAllConditionDimensionsInTracker) {
- onSlicedConditionMayChangeLocked_opt1(overallCondition, eventTimeNs);
+ onSlicedConditionMayChangeLocked_opt1(eventTimeNs);
return;
}
// Now for each of the on-going event, check if the condition has changed for them.
for (auto& whatIt : mCurrentSlicedDurationTrackerMap) {
- whatIt.second->onSlicedConditionMayChange(overallCondition, eventTimeNs);
+ whatIt.second->onSlicedConditionMayChange(eventTimeNs);
}
}
@@ -431,30 +444,32 @@
return;
}
- onSlicedConditionMayChangeInternalLocked(overallCondition, eventTime);
+ onSlicedConditionMayChangeInternalLocked(eventTime);
}
-void DurationMetricProducer::onActiveStateChangedLocked(const int64_t eventTimeNs) {
- MetricProducer::onActiveStateChangedLocked(eventTimeNs);
+void DurationMetricProducer::onActiveStateChangedLocked(const int64_t eventTimeNs,
+ const bool isActive) {
+ MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
if (!mConditionSliced) {
if (ConditionState::kTrue != mCondition) {
return;
}
- if (mIsActive) {
+ if (isActive) {
flushIfNeededLocked(eventTimeNs);
}
for (auto& whatIt : mCurrentSlicedDurationTrackerMap) {
- whatIt.second->onConditionChanged(mIsActive, eventTimeNs);
+ whatIt.second->onConditionChanged(isActive, eventTimeNs);
}
- } else if (mIsActive) {
+ mConditionTimer.onConditionChanged(isActive, eventTimeNs);
+ } else if (isActive) {
flushIfNeededLocked(eventTimeNs);
- onSlicedConditionMayChangeInternalLocked(mIsActive, eventTimeNs);
- } else { // mConditionSliced == true && !mIsActive
+ onSlicedConditionMayChangeInternalLocked(eventTimeNs);
+ } else { // mConditionSliced == true && !isActive
for (auto& whatIt : mCurrentSlicedDurationTrackerMap) {
- whatIt.second->onConditionChanged(mIsActive, eventTimeNs);
+ whatIt.second->onConditionChanged(isActive, eventTimeNs);
}
}
}
@@ -472,6 +487,8 @@
for (auto& whatIt : mCurrentSlicedDurationTrackerMap) {
whatIt.second->onConditionChanged(conditionMet, eventTime);
}
+
+ mConditionTimer.onConditionChanged(mCondition, eventTime);
}
void DurationMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
@@ -558,6 +575,15 @@
(long long)(getBucketNumFromEndTimeNs(bucket.mBucketEndNs)));
}
protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_DURATION, (long long)bucket.mDuration);
+
+ // We only write the condition timer value if the metric has a
+ // condition and isn't sliced by state or condition.
+ // TODO(b/268531762): Slice the condition timer by state and condition
+ if (mConditionTrackerIndex >= 0 && mSlicedStateAtoms.empty() && !mConditionSliced) {
+ protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_CONDITION_TRUE_NS,
+ (long long)bucket.mConditionTrueNs);
+ }
+
protoOutput->end(bucketInfoToken);
VLOG("\t bucket [%lld - %lld] duration: %lld", (long long)bucket.mBucketStartNs,
(long long)bucket.mBucketEndNs, (long long)bucket.mDuration);
@@ -588,17 +614,24 @@
void DurationMetricProducer::flushCurrentBucketLocked(const int64_t& eventTimeNs,
const int64_t& nextBucketStartTimeNs) {
+ const auto [globalConditionTrueNs, globalConditionCorrectionNs] =
+ mConditionTimer.newBucketStart(eventTimeNs, nextBucketStartTimeNs);
+
for (auto whatIt = mCurrentSlicedDurationTrackerMap.begin();
whatIt != mCurrentSlicedDurationTrackerMap.end();) {
- if (whatIt->second->flushCurrentBucket(eventTimeNs, mUploadThreshold, &mPastBuckets)) {
+ if (whatIt->second->flushCurrentBucket(eventTimeNs, mUploadThreshold, globalConditionTrueNs,
+ &mPastBuckets)) {
VLOG("erase bucket for key %s", whatIt->first.toString().c_str());
whatIt = mCurrentSlicedDurationTrackerMap.erase(whatIt);
} else {
++whatIt;
}
}
+
StatsdStats::getInstance().noteBucketCount(mMetricId);
mCurrentBucketStartTimeNs = nextBucketStartTimeNs;
+ // Reset mHasHitGuardrail boolean since bucket was reset
+ mHasHitGuardrail = false;
}
void DurationMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const {
@@ -626,8 +659,11 @@
mConfigKey, mMetricId, newTupleCount);
// 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
if (newTupleCount > StatsdStats::kDimensionKeySizeHardLimit) {
- ALOGE("DurationMetric %lld dropping data for what dimension key %s",
- (long long)mMetricId, newKey.getDimensionKeyInWhat().toString().c_str());
+ if (!mHasHitGuardrail) {
+ ALOGE("DurationMetric %lld dropping data for what dimension key %s",
+ (long long)mMetricId, newKey.getDimensionKeyInWhat().toString().c_str());
+ mHasHitGuardrail = true;
+ }
StatsdStats::getInstance().noteHardDimensionLimitReached(mMetricId);
return true;
}
@@ -696,6 +732,10 @@
return;
}
+ if (!passesSampleCheckLocked(values)) {
+ return;
+ }
+
HashableDimensionKey dimensionInWhat = DEFAULT_DIMENSION_KEY;
if (!mDimensionsInWhat.empty()) {
filterValues(mDimensionsInWhat, values, &dimensionInWhat);
diff --git a/statsd/src/metrics/DurationMetricProducer.h b/statsd/src/metrics/DurationMetricProducer.h
index 206882b..2a9f325 100644
--- a/statsd/src/metrics/DurationMetricProducer.h
+++ b/statsd/src/metrics/DurationMetricProducer.h
@@ -99,16 +99,14 @@
void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
// Internal interface to handle active state change.
- void onActiveStateChangedLocked(const int64_t eventTimeNs) override;
+ void onActiveStateChangedLocked(const int64_t eventTimeNs, const bool isActive) override;
// Internal interface to handle sliced condition change.
void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
- void onSlicedConditionMayChangeInternalLocked(bool overallCondition,
- const int64_t eventTimeNs);
+ void onSlicedConditionMayChangeInternalLocked(const int64_t eventTimeNs);
- void onSlicedConditionMayChangeLocked_opt1(bool overallCondition, const int64_t eventTime);
- void onSlicedConditionMayChangeLocked_opt2(bool overallCondition, const int64_t eventTime);
+ void onSlicedConditionMayChangeLocked_opt1(const int64_t eventTime);
// Internal function to calculate the current used bytes.
size_t byteSizeLocked() const override;
@@ -123,7 +121,7 @@
void flushCurrentBucketLocked(const int64_t& eventTimeNs,
const int64_t& nextBucketStartTimeNs) override;
- bool onConfigUpdatedLocked(
+ optional<InvalidConfigReason> onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
diff --git a/statsd/src/metrics/EventMetricProducer.cpp b/statsd/src/metrics/EventMetricProducer.cpp
index 502ea8a..dd67919 100644
--- a/statsd/src/metrics/EventMetricProducer.cpp
+++ b/statsd/src/metrics/EventMetricProducer.cpp
@@ -78,7 +78,7 @@
mConditionSliced = true;
}
mTotalSize = 0;
- VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)metric.id(),
+ VLOG("metric %lld created. bucket size %lld start_time: %lld", (long long)mMetricId,
(long long)mBucketSizeNs, (long long)mTimeBaseNs);
}
@@ -86,7 +86,7 @@
VLOG("~EventMetricProducer() called");
}
-bool EventMetricProducer::onConfigUpdatedLocked(
+optional<InvalidConfigReason> EventMetricProducer::onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -100,31 +100,35 @@
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
vector<int>& metricsWithActivation) {
- if (!MetricProducer::onConfigUpdatedLocked(
- config, configIndex, metricIndex, allAtomMatchingTrackers,
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, matcherWizard,
- allConditionTrackers, conditionTrackerMap, wizard, metricToActivationMap,
- trackerToMetricMap, conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation)) {
- return false;
+ optional<InvalidConfigReason> invalidConfigReason = MetricProducer::onConfigUpdatedLocked(
+ config, configIndex, metricIndex, allAtomMatchingTrackers, oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, matcherWizard, allConditionTrackers, conditionTrackerMap,
+ wizard, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
const EventMetric& metric = config.event_metric(configIndex);
int trackerIndex;
// Update appropriate indices, specifically mConditionIndex and MetricsManager maps.
- if (!handleMetricWithAtomMatchingTrackers(metric.what(), metricIndex, false,
- allAtomMatchingTrackers, newAtomMatchingTrackerMap,
- trackerToMetricMap, trackerIndex)) {
- return false;
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.what(), mMetricId, metricIndex, false, allAtomMatchingTrackers,
+ newAtomMatchingTrackerMap, trackerToMetricMap, trackerIndex);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
- if (metric.has_condition() &&
- !handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, mConditionTrackerIndex,
- conditionToMetricMap)) {
- return false;
+ if (metric.has_condition()) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), mMetricId, metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, mConditionTrackerIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
+ }
}
- return true;
+ return nullopt;
}
void EventMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
diff --git a/statsd/src/metrics/EventMetricProducer.h b/statsd/src/metrics/EventMetricProducer.h
index 76747b6..c897305 100644
--- a/statsd/src/metrics/EventMetricProducer.h
+++ b/statsd/src/metrics/EventMetricProducer.h
@@ -50,6 +50,9 @@
return METRIC_TYPE_EVENT;
}
+protected:
+ size_t mTotalSize;
+
private:
void onMatchedLogEventInternalLocked(
const size_t matcherIndex, const MetricDimensionKey& eventKey,
@@ -70,7 +73,7 @@
// Internal interface to handle sliced condition change.
void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
- bool onConfigUpdatedLocked(
+ optional<InvalidConfigReason> onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -95,8 +98,6 @@
// Maps the field/value pairs of an atom to a list of timestamps used to deduplicate atoms.
std::unordered_map<AtomDimensionKey, std::vector<int64_t>> mAggregatedAtoms;
-
- size_t mTotalSize;
};
} // namespace statsd
diff --git a/statsd/src/metrics/GaugeMetricProducer.cpp b/statsd/src/metrics/GaugeMetricProducer.cpp
index 4df3e24..feb70a9 100644
--- a/statsd/src/metrics/GaugeMetricProducer.cpp
+++ b/statsd/src/metrics/GaugeMetricProducer.cpp
@@ -93,6 +93,7 @@
mAtomId(atomId),
mIsPulled(pullTagId != -1),
mMinBucketSizeNs(metric.min_bucket_size_nanos()),
+ mSamplingType(metric.sampling_type()),
mMaxPullDelayNs(metric.max_pull_delay_sec() > 0 ? metric.max_pull_delay_sec() * NS_PER_SEC
: StatsdStats::kPullMaxDelayNs),
mDimensionSoftLimit(dimensionSoftLimit),
@@ -108,7 +109,6 @@
}
mBucketSizeNs = bucketSizeMills * 1000000;
- mSamplingType = metric.sampling_type();
if (!metric.gauge_fields_filter().include_all()) {
translateFieldMatcher(metric.gauge_fields_filter().fields(), &mFieldMatchers);
}
@@ -132,7 +132,7 @@
flushIfNeededLocked(startTimeNs);
// Kicks off the puller immediately.
- if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+ if (mIsPulled && isRandomNSamples()) {
mPullerManager->RegisterReceiver(mPullTagId, mConfigKey, this, getCurrentBucketEndTimeNs(),
mBucketSizeNs);
}
@@ -141,18 +141,17 @@
mCurrentBucketStartTimeNs = startTimeNs;
VLOG("Gauge metric %lld created. bucket size %lld start_time: %lld sliced %d",
- (long long)metric.id(), (long long)mBucketSizeNs, (long long)mTimeBaseNs,
- mConditionSliced);
+ (long long)mMetricId, (long long)mBucketSizeNs, (long long)mTimeBaseNs, mConditionSliced);
}
GaugeMetricProducer::~GaugeMetricProducer() {
VLOG("~GaugeMetricProducer() called");
- if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+ if (mIsPulled && isRandomNSamples()) {
mPullerManager->UnRegisterReceiver(mPullTagId, mConfigKey, this);
}
}
-bool GaugeMetricProducer::onConfigUpdatedLocked(
+optional<InvalidConfigReason> GaugeMetricProducer::onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -166,48 +165,55 @@
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
vector<int>& metricsWithActivation) {
- if (!MetricProducer::onConfigUpdatedLocked(
- config, configIndex, metricIndex, allAtomMatchingTrackers,
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, matcherWizard,
- allConditionTrackers, conditionTrackerMap, wizard, metricToActivationMap,
- trackerToMetricMap, conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation)) {
- return false;
+ optional<InvalidConfigReason> invalidConfigReason = MetricProducer::onConfigUpdatedLocked(
+ config, configIndex, metricIndex, allAtomMatchingTrackers, oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, matcherWizard, allConditionTrackers, conditionTrackerMap,
+ wizard, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
const GaugeMetric& metric = config.gauge_metric(configIndex);
// Update appropriate indices: mWhatMatcherIndex, mConditionIndex and MetricsManager maps.
- if (!handleMetricWithAtomMatchingTrackers(metric.what(), metricIndex, /*enforceOneAtom=*/false,
- allAtomMatchingTrackers, newAtomMatchingTrackerMap,
- trackerToMetricMap, mWhatMatcherIndex)) {
- return false;
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.what(), mMetricId, metricIndex, /*enforceOneAtom=*/false,
+ allAtomMatchingTrackers, newAtomMatchingTrackerMap, trackerToMetricMap,
+ mWhatMatcherIndex);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
// Need to update maps since the index changed, but mTriggerAtomId will not change.
int triggerTrackerIndex;
- if (metric.has_trigger_event() &&
- !handleMetricWithAtomMatchingTrackers(metric.trigger_event(), metricIndex,
- /*enforceOneAtom=*/true, allAtomMatchingTrackers,
- newAtomMatchingTrackerMap, trackerToMetricMap,
- triggerTrackerIndex)) {
- return false;
+ if (metric.has_trigger_event()) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.trigger_event(), mMetricId, metricIndex,
+ /*enforceOneAtom=*/true, allAtomMatchingTrackers, newAtomMatchingTrackerMap,
+ trackerToMetricMap, triggerTrackerIndex);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
+ }
}
- if (metric.has_condition() &&
- !handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, mConditionTrackerIndex,
- conditionToMetricMap)) {
- return false;
+ if (metric.has_condition()) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), mMetricId, metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, mConditionTrackerIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
+ }
}
sp<EventMatcherWizard> tmpEventWizard = mEventMatcherWizard;
mEventMatcherWizard = matcherWizard;
// If this is a config update, we must have just forced a partial bucket. Pull if needed to get
// data for the new bucket.
- if (mIsActive && mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+ if (mCondition == ConditionState::kTrue && mIsActive && mIsPulled && isRandomNSamples()) {
pullAndMatchEventsLocked(mCurrentBucketStartTimeNs);
}
- return true;
+ return nullopt;
}
void GaugeMetricProducer::dumpStatesLocked(FILE* out, bool verbose) const {
@@ -356,26 +362,24 @@
}
void GaugeMetricProducer::prepareFirstBucketLocked() {
- if (mIsActive && mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+ if (mCondition == ConditionState::kTrue && mIsActive && mIsPulled && isRandomNSamples()) {
pullAndMatchEventsLocked(mCurrentBucketStartTimeNs);
}
}
+// Only call if mCondition == ConditionState::kTrue && metric is active.
void GaugeMetricProducer::pullAndMatchEventsLocked(const int64_t timestampNs) {
bool triggerPuller = false;
switch(mSamplingType) {
// When the metric wants to do random sampling and there is already one gauge atom for the
// current bucket, do not do it again.
case GaugeMetric::RANDOM_ONE_SAMPLE: {
- triggerPuller = mCondition == ConditionState::kTrue && mCurrentSlicedBucket->empty();
+ triggerPuller = mCurrentSlicedBucket->empty();
break;
}
- case GaugeMetric::CONDITION_CHANGE_TO_TRUE: {
- triggerPuller = mCondition == ConditionState::kTrue;
- break;
- }
+ case GaugeMetric::CONDITION_CHANGE_TO_TRUE:
case GaugeMetric::FIRST_N_SAMPLES: {
- triggerPuller = mCondition == ConditionState::kTrue;
+ triggerPuller = true;
break;
}
default:
@@ -406,12 +410,15 @@
}
}
-void GaugeMetricProducer::onActiveStateChangedLocked(const int64_t eventTimeNs) {
- MetricProducer::onActiveStateChangedLocked(eventTimeNs);
- if (ConditionState::kTrue != mCondition || !mIsPulled) {
+void GaugeMetricProducer::onActiveStateChangedLocked(const int64_t eventTimeNs,
+ const bool isActive) {
+ MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
+
+ if (ConditionState::kTrue != mCondition) {
return;
}
- if (mTriggerAtomId == -1 || (mIsActive && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE)) {
+
+ if (isActive && mIsPulled && isRandomNSamples()) {
pullAndMatchEventsLocked(eventTimeNs);
}
}
@@ -426,7 +433,8 @@
}
flushIfNeededLocked(eventTimeNs);
- if (mIsPulled && mTriggerAtomId == -1) {
+ if (conditionMet && mIsPulled &&
+ (isRandomNSamples() || mSamplingType == GaugeMetric::CONDITION_CHANGE_TO_TRUE)) {
pullAndMatchEventsLocked(eventTimeNs);
} // else: Push mode. No need to proactively pull the gauge data.
}
@@ -443,7 +451,7 @@
flushIfNeededLocked(eventTimeNs);
// If the condition is sliced, mCondition is true if any of the dimensions is true. And we will
// pull for every dimension.
- if (mIsPulled && mTriggerAtomId == -1) {
+ if (overallCondition && mIsPulled && mTriggerAtomId == -1) {
pullAndMatchEventsLocked(eventTimeNs);
} // else: Push mode. No need to proactively pull the gauge data.
}
@@ -472,9 +480,9 @@
}
void GaugeMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData,
- bool pullSuccess, int64_t originalPullTimeNs) {
+ PullResult pullResult, int64_t originalPullTimeNs) {
std::lock_guard<std::mutex> lock(mMutex);
- if (!pullSuccess || allData.size() == 0) {
+ if (pullResult != PullResult::PULL_RESULT_SUCCESS || allData.size() == 0) {
return;
}
const int64_t pullDelayNs = getElapsedRealtimeNs() - originalPullTimeNs;
@@ -502,8 +510,11 @@
StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mMetricId, newTupleCount);
// 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
if (newTupleCount > mDimensionHardLimit) {
- ALOGE("GaugeMetric %lld dropping data for dimension key %s",
- (long long)mMetricId, newKey.toString().c_str());
+ if (!mHasHitGuardrail) {
+ ALOGE("GaugeMetric %lld dropping data for dimension key %s", (long long)mMetricId,
+ newKey.toString().c_str());
+ mHasHitGuardrail = true;
+ }
StatsdStats::getInstance().noteHardDimensionLimitReached(mMetricId);
return true;
}
@@ -528,6 +539,9 @@
flushIfNeededLocked(eventTimeNs);
if (mTriggerAtomId == event.GetTagId()) {
+ // Both Active state and Condition are true here.
+ // Active state being true is checked in onMatchedLogEventLocked.
+ // Condition being true is checked at the start of this method.
pullAndMatchEventsLocked(eventTimeNs);
return;
}
@@ -638,7 +652,7 @@
VLOG("Gauge gauge metric %lld, dump key value: %s", (long long)mMetricId,
slice.first.toString().c_str());
}
- } else {
+ } else if (mIsActive) {
mCurrentSkippedBucket.bucketStartTimeNs = mCurrentBucketStartTimeNs;
mCurrentSkippedBucket.bucketEndTimeNs = bucketEndTime;
if (!maxDropEventsReached()) {
@@ -665,6 +679,8 @@
mCurrentSlicedBucket = std::make_shared<DimToGaugeAtomsMap>();
mCurrentBucketStartTimeNs = nextBucketStartTimeNs;
mCurrentSkippedBucket.reset();
+ // Reset mHasHitGuardrail boolean since bucket was reset
+ mHasHitGuardrail = false;
}
size_t GaugeMetricProducer::byteSizeLocked() const {
diff --git a/statsd/src/metrics/GaugeMetricProducer.h b/statsd/src/metrics/GaugeMetricProducer.h
index 7e1b0c2..4db05c7 100644
--- a/statsd/src/metrics/GaugeMetricProducer.h
+++ b/statsd/src/metrics/GaugeMetricProducer.h
@@ -76,13 +76,19 @@
virtual ~GaugeMetricProducer();
// Handles when the pulled data arrives.
- void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data,
- bool pullSuccess, int64_t originalPullTimeNs) override;
+ void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data, PullResult pullResult,
+ int64_t originalPullTimeNs) override;
+
+ // Determine if metric needs to pull
+ bool isPullNeeded() const override {
+ std::lock_guard<std::mutex> lock(mMutex);
+ return mIsActive && (mCondition == ConditionState::kTrue);
+ };
// GaugeMetric needs to immediately trigger another pull when we create the partial bucket.
void notifyAppUpgradeInternalLocked(const int64_t eventTimeNs) override {
flushLocked(eventTimeNs);
- if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+ if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE && mIsActive) {
pullAndMatchEventsLocked(eventTimeNs);
}
};
@@ -92,7 +98,7 @@
std::lock_guard<std::mutex> lock(mMutex);
flushLocked(eventTimeNs);
- if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
+ if (mIsPulled && mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE && mIsActive) {
pullAndMatchEventsLocked(eventTimeNs);
}
};
@@ -120,7 +126,7 @@
void onConditionChangedLocked(const bool conditionMet, const int64_t eventTime) override;
// Internal interface to handle active state change.
- void onActiveStateChangedLocked(const int64_t eventTimeNs) override;
+ void onActiveStateChangedLocked(const int64_t eventTimeNs, const bool isActive) override;
// Internal interface to handle sliced condition change.
void onSlicedConditionMayChangeLocked(bool overallCondition, const int64_t eventTime) override;
@@ -140,9 +146,10 @@
void prepareFirstBucketLocked() override;
+ // Only call if mCondition == ConditionState::kTrue && metric is active.
void pullAndMatchEventsLocked(const int64_t timestampNs);
- bool onConfigUpdatedLocked(
+ optional<InvalidConfigReason> onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -158,6 +165,11 @@
std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
std::vector<int>& metricsWithActivation) override;
+ inline bool isRandomNSamples() const {
+ return (mTriggerAtomId == -1 && mSamplingType == GaugeMetric::FIRST_N_SAMPLES) ||
+ mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE;
+ }
+
int mWhatMatcherIndex;
sp<EventMatcherWizard> mEventMatcherWizard;
@@ -219,7 +231,9 @@
FRIEND_TEST(GaugeMetricProducerTest, TestPulledEventsAnomalyDetection);
FRIEND_TEST(GaugeMetricProducerTest, TestFirstBucket);
FRIEND_TEST(GaugeMetricProducerTest, TestPullOnTrigger);
+ FRIEND_TEST(GaugeMetricProducerTest, TestPullNWithoutTrigger);
FRIEND_TEST(GaugeMetricProducerTest, TestRemoveDimensionInOutput);
+ FRIEND_TEST(GaugeMetricProducerTest, TestPullDimensionalSampling);
FRIEND_TEST(GaugeMetricProducerTest_PartialBucket, TestPushedEvents);
FRIEND_TEST(GaugeMetricProducerTest_PartialBucket, TestPulled);
diff --git a/statsd/src/metrics/KllMetricProducer.cpp b/statsd/src/metrics/KllMetricProducer.cpp
index e5658a9..91f22f4 100644
--- a/statsd/src/metrics/KllMetricProducer.cpp
+++ b/statsd/src/metrics/KllMetricProducer.cpp
@@ -76,7 +76,7 @@
}
void KllMetricProducer::writePastBucketAggregateToProto(
- const int aggIndex, const unique_ptr<KllQuantile>& kll,
+ const int aggIndex, const unique_ptr<KllQuantile>& kll, const int sampleSize,
ProtoOutputStream* const protoOutput) const {
uint64_t sketchesToken =
protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_SKETCHES);
diff --git a/statsd/src/metrics/KllMetricProducer.h b/statsd/src/metrics/KllMetricProducer.h
index b1c59f2..c142113 100644
--- a/statsd/src/metrics/KllMetricProducer.h
+++ b/statsd/src/metrics/KllMetricProducer.h
@@ -99,6 +99,7 @@
void writePastBucketAggregateToProto(const int aggIndex,
const std::unique_ptr<KllQuantile>& kll,
+ const int sampleSize,
ProtoOutputStream* const protoOutput) const override;
bool aggregateFields(const int64_t eventTimeNs, const MetricDimensionKey& eventKey,
diff --git a/statsd/src/metrics/MetricProducer.cpp b/statsd/src/metrics/MetricProducer.cpp
index 17aa312..bd69798 100644
--- a/statsd/src/metrics/MetricProducer.cpp
+++ b/statsd/src/metrics/MetricProducer.cpp
@@ -62,6 +62,8 @@
mCurrentBucketStartTimeNs(timeBaseNs),
mCurrentBucketNum(0),
mCondition(initialCondition(conditionIndex, initialConditionCache)),
+ // For metrics with pull events, condition timer will be set later within the constructor
+ mConditionTimer(false, timeBaseNs),
mConditionTrackerIndex(conditionIndex),
mConditionSliced(false),
mWizard(wizard),
@@ -73,10 +75,13 @@
mIsActive(mEventActivationMap.empty()),
mSlicedStateAtoms(slicedStateAtoms),
mStateGroupMap(stateGroupMap),
- mSplitBucketForAppUpgrade(splitBucketForAppUpgrade) {
+ mSplitBucketForAppUpgrade(splitBucketForAppUpgrade),
+ mHasHitGuardrail(false),
+ mSampledWhatFields({}),
+ mShardCount(0) {
}
-bool MetricProducer::onConfigUpdatedLocked(
+optional<InvalidConfigReason> MetricProducer::onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -95,17 +100,18 @@
unordered_map<int, shared_ptr<Activation>> newEventActivationMap;
unordered_map<int, vector<shared_ptr<Activation>>> newEventDeactivationMap;
- if (!handleMetricActivationOnConfigUpdate(
- config, mMetricId, metricIndex, metricToActivationMap, oldAtomMatchingTrackerMap,
- newAtomMatchingTrackerMap, mEventActivationMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation, newEventActivationMap,
- newEventDeactivationMap)) {
- return false;
+ optional<InvalidConfigReason> invalidConfigReason = handleMetricActivationOnConfigUpdate(
+ config, mMetricId, metricIndex, metricToActivationMap, oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, mEventActivationMap, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation, newEventActivationMap,
+ newEventDeactivationMap);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
mEventActivationMap = newEventActivationMap;
mEventDeactivationMap = newEventDeactivationMap;
mAnomalyTrackers.clear();
- return true;
+ return nullopt;
}
void MetricProducer::onMatchedLogEventLocked(const size_t matcherIndex, const LogEvent& event) {
@@ -118,6 +124,10 @@
return;
}
+ if (!passesSampleCheckLocked(event.getValues())) {
+ return;
+ }
+
bool condition;
ConditionKey conditionKey;
if (mConditionSliced) {
@@ -194,9 +204,13 @@
if (!mIsActive) {
return;
}
- mIsActive = evaluateActiveStateLocked(elapsedTimestampNs);
- if (!mIsActive) {
- onActiveStateChangedLocked(elapsedTimestampNs);
+ const bool isActive = evaluateActiveStateLocked(elapsedTimestampNs);
+ if (!isActive) { // Metric went from active to not active.
+ onActiveStateChangedLocked(elapsedTimestampNs, false);
+
+ // Set mIsActive to false after onActiveStateChangedLocked to ensure any pulls that occur
+ // through onActiveStateChangedLocked are processed.
+ mIsActive = false;
}
}
@@ -215,10 +229,12 @@
}
activation->start_ns = elapsedTimestampNs;
activation->state = ActivationState::kActive;
- bool oldActiveState = mIsActive;
- mIsActive = true;
- if (!oldActiveState) { // Metric went from not active to active.
- onActiveStateChangedLocked(elapsedTimestampNs);
+ if (!mIsActive) { // Metric was previously inactive and now is active.
+ // Set mIsActive to true before onActiveStateChangedLocked to ensure any pulls that occur
+ // through onActiveStateChangedLocked are processed.
+ mIsActive = true;
+
+ onActiveStateChangedLocked(elapsedTimestampNs, true);
}
}
@@ -354,6 +370,21 @@
return mCurrentSkippedBucket.dropEvents.size() >= StatsdStats::kMaxLoggedBucketDropEvents;
}
+bool MetricProducer::passesSampleCheckLocked(const vector<FieldValue>& values) const {
+ // Only perform sampling if shard count is correct and there is a sampled what field.
+ if (mShardCount <= 1 || mSampledWhatFields.size() == 0) {
+ return true;
+ }
+ // If filtering fails, don't perform sampling. Event could be a gauge trigger event or stop all
+ // event.
+ FieldValue sampleFieldValue;
+ if (!filterValues(mSampledWhatFields[0], values, &sampleFieldValue)) {
+ return true;
+ }
+ return shouldKeepSample(sampleFieldValue, ShardOffsetProvider::getInstance().getShardOffset(),
+ mShardCount);
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/metrics/MetricProducer.h b/statsd/src/metrics/MetricProducer.h
index 95f8dd4..ed3b217 100644
--- a/statsd/src/metrics/MetricProducer.h
+++ b/statsd/src/metrics/MetricProducer.h
@@ -24,13 +24,18 @@
#include "HashableDimensionKey.h"
#include "anomaly/AnomalyTracker.h"
+#include "condition/ConditionTimer.h"
#include "condition/ConditionWizard.h"
#include "config/ConfigKey.h"
+#include "guardrail/StatsdStats.h"
#include "matchers/EventMatcherWizard.h"
#include "matchers/matcher_util.h"
#include "packages/PackageInfoListener.h"
+#include "src/statsd_metadata.pb.h" // MetricMetadata
#include "state/StateListener.h"
#include "state/StateManager.h"
+#include "utils/DbUtils.h"
+#include "utils/ShardOffsetProvider.h"
namespace android {
namespace os {
@@ -132,6 +137,13 @@
}
};
+struct SamplingInfo {
+ // Matchers for sampled fields. Currently only one sampled dimension is supported.
+ std::vector<Matcher> sampledWhatFields;
+
+ int shardCount = 0;
+};
+
template <class T>
optional<bool> getAppUpgradeBucketSplit(const T& metric) {
return metric.has_split_bucket_for_app_upgrade()
@@ -166,7 +178,7 @@
// This metric and all of its dependencies are guaranteed to be preserved across the update.
// This function also updates several maps used by metricsManager.
// This function clears all anomaly trackers. All anomaly trackers need to be added again.
- bool onConfigUpdated(
+ optional<InvalidConfigReason> onConfigUpdated(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -253,7 +265,7 @@
dumpLatency, str_set, protoOutput);
}
- virtual bool onConfigUpdatedLocked(
+ virtual optional<InvalidConfigReason> onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -325,6 +337,21 @@
void writeActiveMetricToProtoOutputStream(
int64_t currentTimeNs, const DumpReportReason reason, ProtoOutputStream* proto);
+ virtual void enforceRestrictedDataTtl(sqlite3* db, const int64_t wallClockNs){};
+
+ virtual bool writeMetricMetadataToProto(metadata::MetricMetadata* metricMetadata) {
+ return false;
+ }
+
+ virtual void loadMetricMetadataFromProto(const metadata::MetricMetadata& metricMetadata){};
+
+ /* Called when the metric is to about to be removed from config. */
+ virtual void onMetricRemove() {
+ }
+
+ virtual void flushRestrictedData() {
+ }
+
// Start: getters/setters
inline int64_t getMetricId() const {
return mMetricId;
@@ -371,6 +398,12 @@
std::lock_guard<std::mutex> lock(mMutex);
mAnomalyTrackers.push_back(anomalyTracker);
}
+
+ void setSamplingInfo(SamplingInfo samplingInfo) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mSampledWhatFields.swap(samplingInfo.sampledWhatFields);
+ mShardCount = samplingInfo.shardCount;
+ }
// End: getters/setters
protected:
/**
@@ -394,7 +427,7 @@
/**
* Flushes all the data including the current partial bucket.
*/
- virtual void flushLocked(const int64_t& eventTimeNs) {
+ void flushLocked(const int64_t& eventTimeNs) {
flushIfNeededLocked(eventTimeNs);
flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
};
@@ -445,8 +478,8 @@
bool evaluateActiveStateLocked(int64_t elapsedTimestampNs);
- virtual void onActiveStateChangedLocked(const int64_t eventTimeNs) {
- if (!mIsActive) {
+ virtual void onActiveStateChangedLocked(const int64_t eventTimeNs, const bool isActive) {
+ if (!isActive) {
flushLocked(eventTimeNs);
}
}
@@ -485,6 +518,8 @@
// exceeded the maximum number allowed, which is currently capped at 10.
bool maxDropEventsReached() const;
+ bool passesSampleCheckLocked(const vector<FieldValue>& values) const;
+
const int64_t mMetricId;
// Hash of the Metric's proto bytes from StatsdConfig, including any activations.
@@ -511,6 +546,8 @@
ConditionState mCondition;
+ ConditionTimer mConditionTimer;
+
int mConditionTrackerIndex;
// TODO(b/185770739): use !mMetric2ConditionLinks.empty()
@@ -563,6 +600,14 @@
// Buckets that were invalidated and had their data dropped.
std::vector<SkippedBucket> mSkippedBuckets;
+ // If hard dimension guardrail is hit, do not spam logcat
+ bool mHasHitGuardrail;
+
+ // Matchers for sampled fields. Currently only one sampled dimension is supported.
+ std::vector<Matcher> mSampledWhatFields;
+
+ int mShardCount;
+
FRIEND_TEST(CountMetricE2eTest, TestSlicedState);
FRIEND_TEST(CountMetricE2eTest, TestSlicedStateWithMap);
FRIEND_TEST(CountMetricE2eTest, TestMultipleSlicedStates);
@@ -600,7 +645,8 @@
FRIEND_TEST(ValueMetricE2eTest, TestInitWithSlicedState_WithIncorrectDimensions);
FRIEND_TEST(ValueMetricE2eTest, TestInitialConditionChanges);
- FRIEND_TEST(MetricsManagerTest, TestInitialConditions);
+ FRIEND_TEST(MetricsManagerUtilTest, TestInitialConditions);
+ FRIEND_TEST(MetricsManagerUtilTest, TestSampledMetrics);
FRIEND_TEST(ConfigUpdateTest, TestUpdateMetricActivations);
FRIEND_TEST(ConfigUpdateTest, TestUpdateCountMetrics);
diff --git a/statsd/src/metrics/MetricsManager.cpp b/statsd/src/metrics/MetricsManager.cpp
index 4e577e1..1822ddf 100644
--- a/statsd/src/metrics/MetricsManager.cpp
+++ b/statsd/src/metrics/MetricsManager.cpp
@@ -18,11 +18,13 @@
#include "MetricsManager.h"
+#include <android-modules-utils/sdk_level.h>
#include <private/android_filesystem_config.h>
#include "CountMetricProducer.h"
#include "condition/CombinationConditionTracker.h"
#include "condition/SimpleConditionTracker.h"
+#include "flags/FlagProvider.h"
#include "guardrail/StatsdStats.h"
#include "matchers/CombinationAtomMatchingTracker.h"
#include "matchers/SimpleAtomMatchingTracker.h"
@@ -32,6 +34,9 @@
#include "stats_log_util.h"
#include "stats_util.h"
#include "statslog_statsd.h"
+#include "utils/DbUtils.h"
+
+using android::modules::sdklevel::IsAtLeastU;
using android::util::FIELD_COUNT_REPEATED;
using android::util::FIELD_TYPE_INT32;
@@ -76,17 +81,24 @@
mWhitelistedAtomIds(config.whitelisted_atom_ids().begin(),
config.whitelisted_atom_ids().end()),
mShouldPersistHistory(config.persist_locally()) {
+ if (!IsAtLeastU() && config.has_restricted_metrics_delegate_package_name()) {
+ mInvalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_ENABLED);
+ return;
+ }
+ if (config.has_restricted_metrics_delegate_package_name()) {
+ mRestrictedMetricsDelegatePackageName = config.restricted_metrics_delegate_package_name();
+ }
// Init the ttl end timestamp.
refreshTtl(timeBaseNs);
-
- mConfigValid = initStatsdConfig(
+ mInvalidConfigReason = initStatsdConfig(
key, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseNs, currentTimeNs, mTagIds, mAllAtomMatchingTrackers, mAtomMatchingTrackerMap,
- mAllConditionTrackers, mConditionTrackerMap, mAllMetricProducers, mMetricProducerMap,
- mAllAnomalyTrackers, mAllPeriodicAlarmTrackers, mConditionToMetricMap,
- mTrackerToMetricMap, mTrackerToConditionMap, mActivationAtomTrackerToMetricMap,
- mDeactivationAtomTrackerToMetricMap, mAlertTrackerMap, mMetricIndexesWithActivation,
- mStateProtoHashes, mNoReportMetricIds);
+ timeBaseNs, currentTimeNs, mTagIdsToMatchersMap, mAllAtomMatchingTrackers,
+ mAtomMatchingTrackerMap, mAllConditionTrackers, mConditionTrackerMap,
+ mAllMetricProducers, mMetricProducerMap, mAllAnomalyTrackers, mAllPeriodicAlarmTrackers,
+ mConditionToMetricMap, mTrackerToMetricMap, mTrackerToConditionMap,
+ mActivationAtomTrackerToMetricMap, mDeactivationAtomTrackerToMetricMap,
+ mAlertTrackerMap, mMetricIndexesWithActivation, mStateProtoHashes, mNoReportMetricIds);
mHashStringsInReport = config.hash_strings_in_metric_report();
mVersionStringsInReport = config.version_strings_in_metric_report();
@@ -118,6 +130,16 @@
const int64_t currentTimeNs,
const sp<AlarmMonitor>& anomalyAlarmMonitor,
const sp<AlarmMonitor>& periodicAlarmMonitor) {
+ if (!IsAtLeastU() && config.has_restricted_metrics_delegate_package_name()) {
+ mInvalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_ENABLED);
+ return false;
+ }
+ if (config.has_restricted_metrics_delegate_package_name()) {
+ mRestrictedMetricsDelegatePackageName = config.restricted_metrics_delegate_package_name();
+ } else {
+ mRestrictedMetricsDelegatePackageName = nullopt;
+ }
vector<sp<AtomMatchingTracker>> newAtomMatchingTrackers;
unordered_map<int64_t, int> newAtomMatchingTrackerMap;
vector<sp<ConditionTracker>> newConditionTrackers;
@@ -128,7 +150,7 @@
vector<sp<AnomalyTracker>> newAnomalyTrackers;
unordered_map<int64_t, int> newAlertTrackerMap;
vector<sp<AlarmTracker>> newPeriodicAlarmTrackers;
- mTagIds.clear();
+ mTagIdsToMatchersMap.clear();
mConditionToMetricMap.clear();
mTrackerToMetricMap.clear();
mTrackerToConditionMap.clear();
@@ -136,11 +158,11 @@
mDeactivationAtomTrackerToMetricMap.clear();
mMetricIndexesWithActivation.clear();
mNoReportMetricIds.clear();
- mConfigValid = updateStatsdConfig(
+ mInvalidConfigReason = updateStatsdConfig(
mConfigKey, config, mUidMap, mPullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
timeBaseNs, currentTimeNs, mAllAtomMatchingTrackers, mAtomMatchingTrackerMap,
mAllConditionTrackers, mConditionTrackerMap, mAllMetricProducers, mMetricProducerMap,
- mAllAnomalyTrackers, mAlertTrackerMap, mStateProtoHashes, mTagIds,
+ mAllAnomalyTrackers, mAlertTrackerMap, mStateProtoHashes, mTagIdsToMatchersMap,
newAtomMatchingTrackers, newAtomMatchingTrackerMap, newConditionTrackers,
newConditionTrackerMap, newMetricProducers, newMetricProducerMap, newAnomalyTrackers,
newAlertTrackerMap, newPeriodicAlarmTrackers, mConditionToMetricMap,
@@ -185,15 +207,16 @@
verifyGuardrailsAndUpdateStatsdStats();
initializeConfigActiveStatus();
- return mConfigValid;
+ return !mInvalidConfigReason.has_value();
}
void MetricsManager::createAllLogSourcesFromConfig(const StatsdConfig& config) {
// Init allowed pushed atom uids.
if (config.allowed_log_source_size() == 0) {
- mConfigValid = false;
ALOGE("Log source allowlist is empty! This config won't get any data. Suggest adding at "
"least AID_SYSTEM and AID_STATSD to the allowed_log_source field.");
+ mInvalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_LOG_SOURCE_ALLOWLIST_EMPTY);
} else {
for (const auto& source : config.allowed_log_source()) {
auto it = UidMap::sAidToUidMapping.find(source);
@@ -206,7 +229,7 @@
if (mAllowedUid.size() + mAllowedPkg.size() > StatsdStats::kMaxLogSourceCount) {
ALOGE("Too many log sources. This is likely to be an error in the config.");
- mConfigValid = false;
+ mInvalidConfigReason = InvalidConfigReason(INVALID_CONFIG_REASON_TOO_MANY_LOG_SOURCES);
} else {
initAllowedLogSources();
}
@@ -221,7 +244,8 @@
mDefaultPullUids.insert(it->second);
} else {
ALOGE("Default pull atom packages must be in sAidToUidMapping");
- mConfigValid = false;
+ mInvalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_DEFAULT_PULL_PACKAGES_NOT_IN_MAP);
}
}
// Init per-atom pull atom packages.
@@ -240,7 +264,8 @@
if (numPullPackages > StatsdStats::kMaxPullAtomPackages) {
ALOGE("Too many sources in default_pull_packages and pull_atom_packages. This is likely to "
"be an error in the config");
- mConfigValid = false;
+ mInvalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_TOO_MANY_SOURCES_IN_PULL_PACKAGES);
} else {
initPullAtomSources();
}
@@ -248,21 +273,27 @@
void MetricsManager::verifyGuardrailsAndUpdateStatsdStats() {
// Guardrail. Reject the config if it's too big.
- if (mAllMetricProducers.size() > StatsdStats::kMaxMetricCountPerConfig ||
- mAllConditionTrackers.size() > StatsdStats::kMaxConditionCountPerConfig ||
- mAllAtomMatchingTrackers.size() > StatsdStats::kMaxMatcherCountPerConfig) {
- ALOGE("This config is too big! Reject!");
- mConfigValid = false;
+ if (mAllMetricProducers.size() > StatsdStats::kMaxMetricCountPerConfig) {
+ ALOGE("This config has too many metrics! Reject!");
+ mInvalidConfigReason = InvalidConfigReason(INVALID_CONFIG_REASON_TOO_MANY_METRICS);
+ }
+ if (mAllConditionTrackers.size() > StatsdStats::kMaxConditionCountPerConfig) {
+ ALOGE("This config has too many predicates! Reject!");
+ mInvalidConfigReason = InvalidConfigReason(INVALID_CONFIG_REASON_TOO_MANY_CONDITIONS);
+ }
+ if (mAllAtomMatchingTrackers.size() > StatsdStats::kMaxMatcherCountPerConfig) {
+ ALOGE("This config has too many matchers! Reject!");
+ mInvalidConfigReason = InvalidConfigReason(INVALID_CONFIG_REASON_TOO_MANY_MATCHERS);
}
if (mAllAnomalyTrackers.size() > StatsdStats::kMaxAlertCountPerConfig) {
ALOGE("This config has too many alerts! Reject!");
- mConfigValid = false;
+ mInvalidConfigReason = InvalidConfigReason(INVALID_CONFIG_REASON_TOO_MANY_ALERTS);
}
// no matter whether this config is valid, log it in the stats.
StatsdStats::getInstance().noteConfigReceived(
mConfigKey, mAllMetricProducers.size(), mAllConditionTrackers.size(),
mAllAtomMatchingTrackers.size(), mAllAnomalyTrackers.size(), mAnnotations,
- mConfigValid);
+ mInvalidConfigReason);
}
void MetricsManager::initializeConfigActiveStatus() {
@@ -306,7 +337,7 @@
}
bool MetricsManager::isConfigValid() const {
- return mConfigValid;
+ return !mInvalidConfigReason.has_value();
}
void MetricsManager::notifyAppUpgrade(const int64_t& eventTimeNs, const string& apk, const int uid,
@@ -411,6 +442,11 @@
const bool include_current_partial_bucket, const bool erase_data,
const DumpLatency dumpLatency, std::set<string>* str_set,
ProtoOutputStream* protoOutput) {
+ if (hasRestrictedMetricsDelegate()) {
+ // TODO(b/268150038): report error to statsdstats
+ VLOG("Unexpected call to onDumpReport in restricted metricsmanager.");
+ return;
+ }
VLOG("=========================Metric Reports Start==========================");
// one StatsLogReport per MetricProduer
for (const auto& producer : mAllMetricProducers) {
@@ -523,7 +559,7 @@
// Consume the stats log if it's interesting to this metric.
void MetricsManager::onLogEvent(const LogEvent& event) {
- if (!mConfigValid) {
+ if (!isConfigValid()) {
return;
}
@@ -531,12 +567,13 @@
return;
}
+ // TODO(b/212755214): this check could be done once on the StatsLogProcessor level
if (!eventSanityCheck(event)) {
return;
}
- int tagId = event.GetTagId();
- int64_t eventTimeNs = event.GetElapsedTimestampNs();
+ const int tagId = event.GetTagId();
+ const int64_t eventTimeNs = event.GetElapsedTimestampNs();
bool isActive = mIsAlwaysActive;
@@ -556,17 +593,28 @@
mIsActive = isActive || !activeMetricsIndices.empty();
- if (mTagIds.find(tagId) == mTagIds.end()) {
+ const auto matchersIt = mTagIdsToMatchersMap.find(tagId);
+
+ if (matchersIt == mTagIdsToMatchersMap.end()) {
// Not interesting...
return;
}
+ if (event.isParsedHeaderOnly()) {
+ // This should not happen if metric config is defined for certain atom id
+ const int64_t firstMatcherId =
+ mAllAtomMatchingTrackers[*matchersIt->second.begin()]->getId();
+ ALOGW("Atom %d is mistakenly skipped - there is a matcher %lld for it", tagId,
+ (long long)firstMatcherId);
+ return;
+ }
+
vector<MatchingState> matcherCache(mAllAtomMatchingTrackers.size(),
MatchingState::kNotComputed);
- // Evaluate all atom matchers.
- for (auto& matcher : mAllAtomMatchingTrackers) {
- matcher->onLogEvent(event, mAllAtomMatchingTrackers, matcherCache);
+ for (const auto& matcherIndex : matchersIt->second) {
+ mAllAtomMatchingTrackers[matcherIndex]->onLogEvent(event, mAllAtomMatchingTrackers,
+ matcherCache);
}
// Set of metrics that received an activation cancellation.
@@ -654,7 +702,6 @@
}
}
}
-
// For matched AtomMatchers, tell relevant metrics that a matched event has come.
for (size_t i = 0; i < mAllAtomMatchingTrackers.size(); i++) {
if (matcherCache[i] == MatchingState::kMatched) {
@@ -749,6 +796,15 @@
}
metadataWritten |= alertWritten;
}
+
+ for (const auto& metricProducer : mAllMetricProducers) {
+ metadata::MetricMetadata* metricMetadata = statsMetadata->add_metric_metadata();
+ bool metricWritten = metricProducer->writeMetricMetadataToProto(metricMetadata);
+ if (!metricWritten) {
+ statsMetadata->mutable_metric_metadata()->RemoveLast();
+ }
+ metadataWritten |= metricWritten;
+ }
return metadataWritten;
}
@@ -757,7 +813,7 @@
int64_t systemElapsedTimeNs) {
for (const metadata::AlertMetadata& alertMetadata : metadata.alert_metadata()) {
int64_t alertId = alertMetadata.alert_id();
- auto it = mAlertTrackerMap.find(alertId);
+ const auto& it = mAlertTrackerMap.find(alertId);
if (it == mAlertTrackerMap.end()) {
ALOGE("No anomalyTracker found for alertId %lld", (long long) alertId);
continue;
@@ -766,6 +822,67 @@
currentWallClockTimeNs,
systemElapsedTimeNs);
}
+ for (const metadata::MetricMetadata& metricMetadata : metadata.metric_metadata()) {
+ int64_t metricId = metricMetadata.metric_id();
+ const auto& it = mMetricProducerMap.find(metricId);
+ if (it == mMetricProducerMap.end()) {
+ ALOGE("No metricProducer found for metricId %lld", (long long)metricId);
+ }
+ mAllMetricProducers[it->second]->loadMetricMetadataFromProto(metricMetadata);
+ }
+}
+
+void MetricsManager::enforceRestrictedDataTtls(const int64_t wallClockNs) {
+ if (!hasRestrictedMetricsDelegate()) {
+ return;
+ }
+ sqlite3* db = dbutils::getDb(mConfigKey);
+ if (db == nullptr) {
+ ALOGE("Failed to open sqlite db");
+ dbutils::closeDb(db);
+ return;
+ }
+ for (const auto& producer : mAllMetricProducers) {
+ producer->enforceRestrictedDataTtl(db, wallClockNs);
+ }
+ dbutils::closeDb(db);
+}
+
+bool MetricsManager::validateRestrictedMetricsDelegate(const int32_t callingUid) {
+ if (!hasRestrictedMetricsDelegate()) {
+ return false;
+ }
+
+ set<int32_t> possibleUids = mUidMap->getAppUid(mRestrictedMetricsDelegatePackageName.value());
+
+ return possibleUids.find(callingUid) != possibleUids.end();
+}
+
+void MetricsManager::flushRestrictedData() {
+ if (!hasRestrictedMetricsDelegate()) {
+ return;
+ }
+ int64_t flushStartNs = getElapsedRealtimeNs();
+ for (const auto& producer : mAllMetricProducers) {
+ producer->flushRestrictedData();
+ }
+ StatsdStats::getInstance().noteRestrictedConfigFlushLatency(
+ mConfigKey, getElapsedRealtimeNs() - flushStartNs);
+}
+
+vector<int64_t> MetricsManager::getAllMetricIds() const {
+ vector<int64_t> metricIds;
+ metricIds.reserve(mMetricProducerMap.size());
+ for (const auto& [metricId, _] : mMetricProducerMap) {
+ metricIds.push_back(metricId);
+ }
+ return metricIds;
+}
+
+void MetricsManager::addAllAtomIds(LogEventFilter::AtomIdSet& allIds) const {
+ for (const auto& [atomId, _] : mTagIdsToMatchersMap) {
+ allIds.insert(atomId);
+ }
}
} // namespace statsd
diff --git a/statsd/src/metrics/MetricsManager.h b/statsd/src/metrics/MetricsManager.h
index 4600ae1..8b7869a 100644
--- a/statsd/src/metrics/MetricsManager.h
+++ b/statsd/src/metrics/MetricsManager.h
@@ -16,20 +16,21 @@
#pragma once
+#include <unordered_map>
+
#include "anomaly/AlarmMonitor.h"
#include "anomaly/AlarmTracker.h"
#include "anomaly/AnomalyTracker.h"
#include "condition/ConditionTracker.h"
#include "config/ConfigKey.h"
#include "external/StatsPullerManager.h"
-#include "src/statsd_config.pb.h"
-#include "src/statsd_metadata.pb.h"
+#include "guardrail/StatsdStats.h"
#include "logd/LogEvent.h"
#include "matchers/AtomMatchingTracker.h"
#include "metrics/MetricProducer.h"
#include "packages/UidMap.h"
-
-#include <unordered_map>
+#include "src/statsd_config.pb.h"
+#include "src/statsd_metadata.pb.h"
namespace android {
namespace os {
@@ -57,7 +58,7 @@
bool eventSanityCheck(const LogEvent& event);
- void onLogEvent(const LogEvent& event);
+ virtual void onLogEvent(const LogEvent& event);
void onAnomalyAlarmFired(
const int64_t& timestampNs,
@@ -160,6 +161,31 @@
void loadMetadata(const metadata::StatsMetadata& metadata,
int64_t currentWallClockTimeNs,
int64_t systemElapsedTimeNs);
+
+ inline bool hasRestrictedMetricsDelegate() const {
+ return mRestrictedMetricsDelegatePackageName.has_value();
+ }
+
+ inline string getRestrictedMetricsDelegate() const {
+ return hasRestrictedMetricsDelegate() ? mRestrictedMetricsDelegatePackageName.value() : "";
+ }
+
+ inline ConfigKey getConfigKey() const {
+ return mConfigKey;
+ }
+
+ void enforceRestrictedDataTtls(const int64_t wallClockNs);
+
+ bool validateRestrictedMetricsDelegate(const int32_t callingUid);
+
+ virtual void flushRestrictedData();
+
+ // Slow, should not be called in a hotpath.
+ vector<int64_t> getAllMetricIds() const;
+
+ // Adds all atom ids referenced by matchers in the MetricsManager's config
+ void addAllAtomIds(LogEventFilter::AtomIdSet& allIds) const;
+
private:
// For test only.
inline int64_t getTtlEndNs() const { return mTtlEndNs; }
@@ -168,8 +194,6 @@
sp<UidMap> mUidMap;
- bool mConfigValid = false;
-
bool mHashStringsInReport = false;
bool mVersionStringsInReport = false;
bool mInstallerInReport = false;
@@ -181,6 +205,8 @@
int64_t mLastReportTimeNs;
int64_t mLastReportWallClockNs;
+ optional<InvalidConfigReason> mInvalidConfigReason;
+
sp<StatsPullerManager> mPullerManager;
// The uid log sources from StatsdConfig.
@@ -216,8 +242,8 @@
bool mShouldPersistHistory;
- // All event tags that are interesting to my metrics.
- std::set<int> mTagIds;
+ // All event tags that are interesting to config metrics matchers.
+ std::unordered_map<int, std::vector<int>> mTagIdsToMatchersMap;
// We only store the sp of AtomMatchingTracker, MetricProducer, and ConditionTracker in
// MetricsManager. There are relationships between them, and the relationships are denoted by
@@ -256,7 +282,7 @@
// To make the log processing more efficient, we want to do as much filtering as possible
// before we go into individual trackers and conditions to match.
- // 1st filter: check if the event tag id is in mTagIds.
+ // 1st filter: check if the event tag id is in mTagIdsToMatchersMap.
// 2nd filter: if it is, we parse the event because there is at least one member is interested.
// then pass to all AtomMatchingTrackers (itself also filter events by ids).
// 3nd filter: for AtomMatchingTrackers that matched this event, we pass this event to the
@@ -292,11 +318,12 @@
void initPullAtomSources();
// Only called on config creation/update to initialize log sources from the config.
- // Calls initAllowedLogSources and initPullAtomSources. Sets mConfigValid to false on error.
+ // Calls initAllowedLogSources and initPullAtomSources. Sets up mInvalidConfigReason on
+ // error.
void createAllLogSourcesFromConfig(const StatsdConfig& config);
// Verifies the config meets guardrails and updates statsdStats.
- // Sets mConfigValid to false on error. Should be called on config creation/update
+ // Sets up mInvalidConfigReason on error. Should be called on config creation/update
void verifyGuardrailsAndUpdateStatsdStats();
// Initializes mIsAlwaysActive and mIsActive.
@@ -315,10 +342,16 @@
// Hashes of the States used in this config, keyed by the state id, used in config updates.
std::map<int64_t, uint64_t> mStateProtoHashes;
+ // Optional package name of the delegate that processes restricted metrics
+ // If set, restricted metrics are only uploaded to the delegate.
+ optional<string> mRestrictedMetricsDelegatePackageName = nullopt;
+
FRIEND_TEST(WakelockDurationE2eTest, TestAggregatedPredicateDimensions);
FRIEND_TEST(MetricConditionLinkE2eTest, TestMultiplePredicatesAndLinks);
FRIEND_TEST(AttributionE2eTest, TestAttributionMatchAndSliceByFirstUid);
FRIEND_TEST(AttributionE2eTest, TestAttributionMatchAndSliceByChain);
+ FRIEND_TEST(GaugeMetricE2ePulledTest, TestFirstNSamplesPulledNoTrigger);
+ FRIEND_TEST(GaugeMetricE2ePulledTest, TestFirstNSamplesPulledNoTriggerWithActivation);
FRIEND_TEST(GaugeMetricE2ePushedTest, TestMultipleFieldsForPushedEvent);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvents);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvent_LateAlarm);
@@ -326,15 +359,16 @@
FRIEND_TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsNoCondition);
FRIEND_TEST(GaugeMetricE2ePulledTest, TestConditionChangeToTrueSamplePulledEvents);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestSlicedCountMetric_single_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestSlicedCountMetric_multiple_buckets);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestCountMetric_save_refractory_to_disk_no_data_written);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestCountMetric_save_refractory_to_disk);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestCountMetric_load_refractory_from_disk);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_partial_bucket);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
- FRIEND_TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest, TestSlicedCountMetric_single_bucket);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest, TestSlicedCountMetric_multiple_buckets);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest,
+ TestCountMetric_save_refractory_to_disk_no_data_written);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest, TestCountMetric_save_refractory_to_disk);
+ FRIEND_TEST(AnomalyCountDetectionE2eTest, TestCountMetric_load_refractory_from_disk);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_single_bucket);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_partial_bucket);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets);
+ FRIEND_TEST(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period);
FRIEND_TEST(AlarmE2eTest, TestMultipleAlarms);
FRIEND_TEST(ConfigTtlE2eTest, TestCountMetric);
@@ -347,6 +381,10 @@
FRIEND_TEST(MetricsManagerTest, TestLogSources);
FRIEND_TEST(MetricsManagerTest, TestLogSourcesOnConfigUpdate);
+ FRIEND_TEST(MetricsManagerTest, TestOnMetricRemoveCalled);
+ FRIEND_TEST(MetricsManagerTest_SPlus, TestRestrictedMetricsConfig);
+ FRIEND_TEST(MetricsManagerTest_SPlus, TestRestrictedMetricsConfigUpdate);
+ FRIEND_TEST(MetricsManagerUtilTest, TestSampledMetrics);
FRIEND_TEST(StatsLogProcessorTest, TestActiveConfigMetricDiskWriteRead);
FRIEND_TEST(StatsLogProcessorTest, TestActivationOnBoot);
@@ -381,6 +419,7 @@
FRIEND_TEST(ValueMetricE2eTest, TestInitWithSlicedState);
FRIEND_TEST(ValueMetricE2eTest, TestInitWithSlicedState_WithDimensions);
FRIEND_TEST(ValueMetricE2eTest, TestInitWithSlicedState_WithIncorrectDimensions);
+ FRIEND_TEST(GaugeMetricE2ePushedTest, TestDimensionalSampling);
};
} // namespace statsd
diff --git a/statsd/src/metrics/NumericValueMetricProducer.cpp b/statsd/src/metrics/NumericValueMetricProducer.cpp
index 703b07b..12c8c04 100644
--- a/statsd/src/metrics/NumericValueMetricProducer.cpp
+++ b/statsd/src/metrics/NumericValueMetricProducer.cpp
@@ -50,6 +50,7 @@
const int FIELD_ID_VALUE_INDEX = 1;
const int FIELD_ID_VALUE_LONG = 2;
const int FIELD_ID_VALUE_DOUBLE = 3;
+const int FIELD_ID_VALUE_SAMPLESIZE = 4;
const int FIELD_ID_VALUES = 9;
const int FIELD_ID_BUCKET_NUM = 4;
const int FIELD_ID_START_BUCKET_ELAPSED_MILLIS = 5;
@@ -71,6 +72,9 @@
conditionOptions, stateOptions, activationOptions, guardrailOptions),
mUseAbsoluteValueOnReset(metric.use_absolute_value_on_reset()),
mAggregationType(metric.aggregation_type()),
+ mIncludeSampleSize(metric.has_include_sample_size()
+ ? metric.include_sample_size()
+ : metric.aggregation_type() == ValueMetric_AggregationType_AVG),
mUseDiff(metric.has_use_diff() ? metric.use_diff() : isPulled()),
mValueDirection(metric.value_direction()),
mSkipZeroDiffOutput(metric.skip_zero_diff_output()),
@@ -112,10 +116,14 @@
}
void NumericValueMetricProducer::writePastBucketAggregateToProto(
- const int aggIndex, const Value& value, ProtoOutputStream* const protoOutput) const {
+ const int aggIndex, const Value& value, const int sampleSize,
+ ProtoOutputStream* const protoOutput) const {
uint64_t valueToken =
protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_VALUES);
protoOutput->write(FIELD_TYPE_INT32 | FIELD_ID_VALUE_INDEX, aggIndex);
+ if (mIncludeSampleSize) {
+ protoOutput->write(FIELD_TYPE_INT32 | FIELD_ID_VALUE_SAMPLESIZE, sampleSize);
+ }
if (value.getType() == LONG) {
protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_VALUE_LONG, (long long)value.long_value);
VLOG("\t\t value %d: %lld", aggIndex, (long long)value.long_value);
@@ -128,10 +136,11 @@
protoOutput->end(valueToken);
}
-void NumericValueMetricProducer::onActiveStateChangedInternalLocked(const int64_t eventTimeNs) {
+void NumericValueMetricProducer::onActiveStateChangedInternalLocked(const int64_t eventTimeNs,
+ const bool isActive) {
// When active state changes from true to false for pulled metric, clear diff base but don't
// reset other counters as we may accumulate more value in the bucket.
- if (mUseDiff && !mIsActive) {
+ if (mUseDiff && !isActive) {
resetBase();
}
}
@@ -174,14 +183,14 @@
// By design, statsd pulls data at bucket boundaries using AlarmManager. These pulls are likely
// to be delayed. Other events like condition changes or app upgrade which are not based on
// AlarmManager might have arrived earlier and close the bucket.
-void NumericValueMetricProducer::onDataPulled(const vector<shared_ptr<LogEvent>>& allData,
- bool pullSuccess, int64_t originalPullTimeNs) {
+void NumericValueMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData,
+ PullResult pullResult, int64_t originalPullTimeNs) {
lock_guard<mutex> lock(mMutex);
if (mCondition == ConditionState::kTrue) {
// If the pull failed, we won't be able to compute a diff.
- if (!pullSuccess) {
+ if (pullResult == PullResult::PULL_RESULT_FAIL) {
invalidateCurrentBucket(originalPullTimeNs, BucketDropReason::PULL_FAILED);
- } else {
+ } else if (pullResult == PullResult::PULL_RESULT_SUCCESS) {
bool isEventLate = originalPullTimeNs < getCurrentBucketEndTimeNs();
if (isEventLate) {
// If the event is late, we are in the middle of a bucket. Just
@@ -332,8 +341,7 @@
}
}
-bool NumericValueMetricProducer::hitFullBucketGuardRailLocked(
- const MetricDimensionKey& newKey) const {
+bool NumericValueMetricProducer::hitFullBucketGuardRailLocked(const MetricDimensionKey& newKey) {
// ===========GuardRail==============
// 1. Report the tuple count if the tuple count > soft limit
if (mCurrentFullBucket.find(newKey) != mCurrentFullBucket.end()) {
@@ -343,8 +351,11 @@
size_t newTupleCount = mCurrentFullBucket.size() + 1;
// 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
if (newTupleCount > mDimensionHardLimit) {
- ALOGE("ValueMetric %lld dropping data for full bucket dimension key %s",
- (long long)mMetricId, newKey.toString().c_str());
+ if (!mHasHitGuardrail) {
+ ALOGE("ValueMetric %lld dropping data for full bucket dimension key %s",
+ (long long)mMetricId, newKey.toString().c_str());
+ mHasHitGuardrail = true;
+ }
return true;
}
}
@@ -524,6 +535,9 @@
bucket.aggIndex.push_back(interval.aggIndex);
bucket.aggregates.push_back(getFinalValue(interval));
+ if (mIncludeSampleSize) {
+ bucket.sampleSizes.push_back(interval.sampleSize);
+ }
}
return bucket;
}
diff --git a/statsd/src/metrics/NumericValueMetricProducer.h b/statsd/src/metrics/NumericValueMetricProducer.h
index 8eca3f8..78c414f 100644
--- a/statsd/src/metrics/NumericValueMetricProducer.h
+++ b/statsd/src/metrics/NumericValueMetricProducer.h
@@ -39,9 +39,15 @@
const GuardrailOptions& guardrailOptions);
// Process data pulled on bucket boundary.
- void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data, bool pullSuccess,
+ void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData, PullResult pullResult,
int64_t originalPullTimeNs) override;
+ // Determine if metric needs to pull
+ bool isPullNeeded() const override {
+ std::lock_guard<std::mutex> lock(mMutex);
+ return mIsActive && (mCondition == ConditionState::kTrue);
+ }
+
inline MetricType getMetricType() const override {
return METRIC_TYPE_VALUE;
}
@@ -66,7 +72,8 @@
return config.value_metric(configIndex).links();
}
- void onActiveStateChangedInternalLocked(const int64_t eventTimeNs) override;
+ void onActiveStateChangedInternalLocked(const int64_t eventTimeNs,
+ const bool isActive) override;
// Only called when mIsActive and the event is NOT too late.
void onConditionChangedInternalLocked(const ConditionState oldCondition,
@@ -108,7 +115,7 @@
void appendToFullBucket(const bool isFullBucketReached);
- bool hitFullBucketGuardRailLocked(const MetricDimensionKey& newKey) const;
+ bool hitFullBucketGuardRailLocked(const MetricDimensionKey& newKey);
inline bool canSkipLogEventLocked(
const MetricDimensionKey& eventKey, const bool condition, const int64_t eventTimeNs,
@@ -129,6 +136,7 @@
DumpProtoFields getDumpProtoFields() const override;
void writePastBucketAggregateToProto(const int aggIndex, const Value& value,
+ const int sampleSize,
ProtoOutputStream* const protoOutput) const override;
// Internal function to calculate the current used bytes.
@@ -141,6 +149,8 @@
const ValueMetric::AggregationType mAggregationType;
+ const bool mIncludeSampleSize;
+
const bool mUseDiff;
const ValueMetric::ValueDirection mValueDirection;
@@ -203,6 +213,7 @@
TestResetBaseOnPullFailAfterConditionChange_EndOfBucket);
FRIEND_TEST(NumericValueMetricProducerTest, TestResetBaseOnPullFailBeforeConditionChange);
FRIEND_TEST(NumericValueMetricProducerTest, TestResetBaseOnPullTooLate);
+ FRIEND_TEST(NumericValueMetricProducerTest, TestSampleSize);
FRIEND_TEST(NumericValueMetricProducerTest, TestSkipZeroDiffOutput);
FRIEND_TEST(NumericValueMetricProducerTest, TestSkipZeroDiffOutputMultiValue);
FRIEND_TEST(NumericValueMetricProducerTest, TestSlicedState);
diff --git a/statsd/src/metrics/RestrictedEventMetricProducer.cpp b/statsd/src/metrics/RestrictedEventMetricProducer.cpp
new file mode 100644
index 0000000..f38e835
--- /dev/null
+++ b/statsd/src/metrics/RestrictedEventMetricProducer.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define STATSD_DEBUG true
+#include "Log.h"
+
+#include "RestrictedEventMetricProducer.h"
+
+#include "stats_annotations.h"
+#include "stats_log_util.h"
+#include "utils/DbUtils.h"
+
+using std::lock_guard;
+using std::vector;
+
+namespace android {
+namespace os {
+namespace statsd {
+
+#define NS_PER_DAY (24 * 3600 * NS_PER_SEC)
+
+RestrictedEventMetricProducer::RestrictedEventMetricProducer(
+ const ConfigKey& key, const EventMetric& metric, const int conditionIndex,
+ const vector<ConditionState>& initialConditionCache, const sp<ConditionWizard>& wizard,
+ const uint64_t protoHash, const int64_t startTimeNs,
+ const unordered_map<int, shared_ptr<Activation>>& eventActivationMap,
+ const unordered_map<int, vector<shared_ptr<Activation>>>& eventDeactivationMap,
+ const vector<int>& slicedStateAtoms,
+ const unordered_map<int, unordered_map<int, int64_t>>& stateGroupMap)
+ : EventMetricProducer(key, metric, conditionIndex, initialConditionCache, wizard, protoHash,
+ startTimeNs, eventActivationMap, eventDeactivationMap, slicedStateAtoms,
+ stateGroupMap),
+ mRestrictedDataCategory(CATEGORY_UNKNOWN) {
+}
+
+void RestrictedEventMetricProducer::onMatchedLogEventInternalLocked(
+ const size_t matcherIndex, const MetricDimensionKey& eventKey,
+ const ConditionKey& conditionKey, bool condition, const LogEvent& event,
+ const std::map<int, HashableDimensionKey>& statePrimaryKeys) {
+ if (!condition) {
+ return;
+ }
+ if (mRestrictedDataCategory != CATEGORY_UNKNOWN &&
+ mRestrictedDataCategory != event.getRestrictionCategory()) {
+ StatsdStats::getInstance().noteRestrictedMetricCategoryChanged(mConfigKey, mMetricId);
+ deleteMetricTable();
+ mLogEvents.clear();
+ mTotalSize = 0;
+ }
+ mRestrictedDataCategory = event.getRestrictionCategory();
+ mLogEvents.push_back(event);
+ mTotalSize += getSize(event.getValues()) + sizeof(event);
+}
+
+void RestrictedEventMetricProducer::onDumpReportLocked(
+ const int64_t dumpTimeNs, const bool include_current_partial_bucket, const bool erase_data,
+ const DumpLatency dumpLatency, std::set<string>* str_set,
+ android::util::ProtoOutputStream* protoOutput) {
+ VLOG("Unexpected call to onDumpReportLocked() in RestrictedEventMetricProducer");
+}
+
+void RestrictedEventMetricProducer::onMetricRemove() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (!mIsMetricTableCreated) {
+ return;
+ }
+ deleteMetricTable();
+}
+
+void RestrictedEventMetricProducer::enforceRestrictedDataTtl(sqlite3* db,
+ const int64_t wallClockNs) {
+ int32_t ttlInDays = RestrictedPolicyManager::getInstance().getRestrictedCategoryTtl(
+ mRestrictedDataCategory);
+ int64_t ttlTime = wallClockNs - ttlInDays * NS_PER_DAY;
+ dbutils::flushTtl(db, mMetricId, ttlTime);
+}
+
+void RestrictedEventMetricProducer::clearPastBucketsLocked(const int64_t dumpTimeNs) {
+ VLOG("Unexpected call to clearPastBucketsLocked in RestrictedEventMetricProducer");
+}
+
+void RestrictedEventMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
+ mLogEvents.clear();
+ mTotalSize = 0;
+ StatsdStats::getInstance().noteBucketDropped(mMetricId);
+}
+
+void RestrictedEventMetricProducer::flushRestrictedData() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (mLogEvents.empty()) {
+ return;
+ }
+ int64_t flushStartNs = getElapsedRealtimeNs();
+ if (!mIsMetricTableCreated) {
+ if (!dbutils::isEventCompatible(mConfigKey, mMetricId, mLogEvents[0])) {
+ // Delete old data if schema changes
+ // TODO(b/268150038): report error to statsdstats
+ ALOGD("Detected schema change for metric %lld", (long long)mMetricId);
+ deleteMetricTable();
+ }
+ // TODO(b/271481944): add retry.
+ if (!dbutils::createTableIfNeeded(mConfigKey, mMetricId, mLogEvents[0])) {
+ ALOGE("Failed to create table for metric %lld", (long long)mMetricId);
+ StatsdStats::getInstance().noteRestrictedMetricTableCreationError(mConfigKey,
+ mMetricId);
+ return;
+ }
+ mIsMetricTableCreated = true;
+ }
+ string err;
+ if (!dbutils::insert(mConfigKey, mMetricId, mLogEvents, err)) {
+ ALOGE("Failed to insert logEvent to table for metric %lld. err=%s", (long long)mMetricId,
+ err.c_str());
+ StatsdStats::getInstance().noteRestrictedMetricInsertError(mConfigKey, mMetricId);
+ } else {
+ StatsdStats::getInstance().noteRestrictedMetricFlushLatency(
+ mConfigKey, mMetricId, getElapsedRealtimeNs() - flushStartNs);
+ }
+ mLogEvents.clear();
+ mTotalSize = 0;
+}
+
+bool RestrictedEventMetricProducer::writeMetricMetadataToProto(
+ metadata::MetricMetadata* metricMetadata) {
+ metricMetadata->set_metric_id(mMetricId);
+ metricMetadata->set_restricted_category(mRestrictedDataCategory);
+ return true;
+}
+
+void RestrictedEventMetricProducer::loadMetricMetadataFromProto(
+ const metadata::MetricMetadata& metricMetadata) {
+ mRestrictedDataCategory =
+ static_cast<StatsdRestrictionCategory>(metricMetadata.restricted_category());
+}
+
+void RestrictedEventMetricProducer::deleteMetricTable() {
+ if (!dbutils::deleteTable(mConfigKey, mMetricId)) {
+ StatsdStats::getInstance().noteRestrictedMetricTableDeletionError(mConfigKey, mMetricId);
+ VLOG("Failed to delete table for metric %lld", (long long)mMetricId);
+ }
+ mIsMetricTableCreated = false;
+}
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/src/metrics/RestrictedEventMetricProducer.h b/statsd/src/metrics/RestrictedEventMetricProducer.h
new file mode 100644
index 0000000..74b0b80
--- /dev/null
+++ b/statsd/src/metrics/RestrictedEventMetricProducer.h
@@ -0,0 +1,67 @@
+#ifndef RESTRICTED_EVENT_METRIC_PRODUCER_H
+#define RESTRICTED_EVENT_METRIC_PRODUCER_H
+
+#include <gtest/gtest_prod.h>
+
+#include "EventMetricProducer.h"
+#include "utils/RestrictedPolicyManager.h"
+
+namespace android {
+namespace os {
+namespace statsd {
+
+class RestrictedEventMetricProducer : public EventMetricProducer {
+public:
+ RestrictedEventMetricProducer(
+ const ConfigKey& key, const EventMetric& eventMetric, const int conditionIndex,
+ const vector<ConditionState>& initialConditionCache, const sp<ConditionWizard>& wizard,
+ const uint64_t protoHash, const int64_t startTimeNs,
+ const std::unordered_map<int, std::shared_ptr<Activation>>& eventActivationMap = {},
+ const std::unordered_map<int, std::vector<std::shared_ptr<Activation>>>&
+ eventDeactivationMap = {},
+ const vector<int>& slicedStateAtoms = {},
+ const unordered_map<int, unordered_map<int, int64_t>>& stateGroupMap = {});
+
+ void onMetricRemove() override;
+
+ void enforceRestrictedDataTtl(sqlite3* db, const int64_t wallClockNs);
+
+ void flushRestrictedData() override;
+
+ bool writeMetricMetadataToProto(metadata::MetricMetadata* metricMetadata) override;
+
+ void loadMetricMetadataFromProto(const metadata::MetricMetadata& metricMetadata) override;
+
+ inline StatsdRestrictionCategory getRestrictionCategory() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ return mRestrictedDataCategory;
+ }
+
+private:
+ void onMatchedLogEventInternalLocked(
+ const size_t matcherIndex, const MetricDimensionKey& eventKey,
+ const ConditionKey& conditionKey, bool condition, const LogEvent& event,
+ const std::map<int, HashableDimensionKey>& statePrimaryKeys) override;
+
+ void onDumpReportLocked(const int64_t dumpTimeNs, const bool include_current_partial_bucket,
+ const bool erase_data, const DumpLatency dumpLatency,
+ std::set<string>* str_set,
+ android::util::ProtoOutputStream* protoOutput) override;
+
+ void clearPastBucketsLocked(const int64_t dumpTimeNs) override;
+
+ void dropDataLocked(const int64_t dropTimeNs) override;
+
+ void deleteMetricTable();
+
+ bool mIsMetricTableCreated = false;
+
+ StatsdRestrictionCategory mRestrictedDataCategory;
+
+ vector<LogEvent> mLogEvents;
+};
+
+} // namespace statsd
+} // namespace os
+} // namespace android
+#endif // RESTRICTED_EVENT_METRIC_PRODUCER_H
diff --git a/statsd/src/metrics/ValueMetricProducer.cpp b/statsd/src/metrics/ValueMetricProducer.cpp
index 50958ca..53f78c2 100644
--- a/statsd/src/metrics/ValueMetricProducer.cpp
+++ b/statsd/src/metrics/ValueMetricProducer.cpp
@@ -90,8 +90,6 @@
mDimensionSoftLimit(guardrailOptions.dimensionSoftLimit),
mDimensionHardLimit(guardrailOptions.dimensionHardLimit),
mCurrentBucketIsSkipped(false),
- // Condition timer will be set later within the constructor after pulling events
- mConditionTimer(false, bucketOptions.timeBaseNs),
mConditionCorrectionThresholdNs(bucketOptions.conditionCorrectionThresholdNs) {
// TODO(b/185722221): inject directly via initializer list in MetricProducer.
mBucketSizeNs = bucketOptions.bucketSizeNs;
@@ -158,9 +156,7 @@
const int64_t& eventTimeNs) {
lock_guard<mutex> lock(mMutex);
- // TODO(b/188837487): Add mIsActive check
-
- if (isPulled() && mCondition == ConditionState::kTrue) {
+ if (isPulled() && mCondition == ConditionState::kTrue && mIsActive) {
pullAndMatchEventsLocked(eventTimeNs);
}
flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
@@ -169,15 +165,15 @@
template <typename AggregatedValue, typename DimExtras>
void ValueMetricProducer<AggregatedValue, DimExtras>::notifyAppUpgradeInternalLocked(
const int64_t eventTimeNs) {
- // TODO(b/188837487): Add mIsActive check
- if (isPulled() && mCondition == ConditionState::kTrue) {
+ if (isPulled() && mCondition == ConditionState::kTrue && mIsActive) {
pullAndMatchEventsLocked(eventTimeNs);
}
flushCurrentBucketLocked(eventTimeNs, eventTimeNs);
}
template <typename AggregatedValue, typename DimExtras>
-bool ValueMetricProducer<AggregatedValue, DimExtras>::onConfigUpdatedLocked(
+optional<InvalidConfigReason>
+ValueMetricProducer<AggregatedValue, DimExtras>::onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -191,35 +187,37 @@
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
vector<int>& metricsWithActivation) {
- if (!MetricProducer::onConfigUpdatedLocked(
- config, configIndex, metricIndex, allAtomMatchingTrackers,
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, matcherWizard,
- allConditionTrackers, conditionTrackerMap, wizard, metricToActivationMap,
- trackerToMetricMap, conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation)) {
- return false;
+ optional<InvalidConfigReason> invalidConfigReason = MetricProducer::onConfigUpdatedLocked(
+ config, configIndex, metricIndex, allAtomMatchingTrackers, oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, matcherWizard, allConditionTrackers, conditionTrackerMap,
+ wizard, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
-
// Update appropriate indices: mWhatMatcherIndex, mConditionIndex and MetricsManager maps.
const int64_t atomMatcherId = getWhatAtomMatcherIdForMetric(config, configIndex);
- if (!handleMetricWithAtomMatchingTrackers(atomMatcherId, metricIndex, /*enforceOneAtom=*/false,
- allAtomMatchingTrackers, newAtomMatchingTrackerMap,
- trackerToMetricMap, mWhatMatcherIndex)) {
- return false;
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ atomMatcherId, mMetricId, metricIndex, /*enforceOneAtom=*/false,
+ allAtomMatchingTrackers, newAtomMatchingTrackerMap, trackerToMetricMap,
+ mWhatMatcherIndex);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
-
const optional<int64_t>& conditionIdOpt = getConditionIdForMetric(config, configIndex);
const ConditionLinks& conditionLinks = getConditionLinksForMetric(config, configIndex);
- if (conditionIdOpt.has_value() &&
- !handleMetricWithConditions(conditionIdOpt.value(), metricIndex, conditionTrackerMap,
- conditionLinks, allConditionTrackers, mConditionTrackerIndex,
- conditionToMetricMap)) {
- return false;
+ if (conditionIdOpt.has_value()) {
+ invalidConfigReason = handleMetricWithConditions(
+ conditionIdOpt.value(), mMetricId, metricIndex, conditionTrackerMap, conditionLinks,
+ allConditionTrackers, mConditionTrackerIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
+ }
}
-
sp<EventMatcherWizard> tmpEventWizard = mEventMatcherWizard;
mEventMatcherWizard = matcherWizard;
- return true;
+ return nullopt;
}
template <typename AggregatedValue, typename DimExtras>
@@ -296,14 +294,17 @@
const DumpLatency dumpLatency, set<string>* strSet, ProtoOutputStream* protoOutput) {
VLOG("metric %lld dump report now...", (long long)mMetricId);
- // TODO(b/188837487): Add mIsActive check
-
+ // Pulled metrics need to pull before flushing, which is why they do not call flushIfNeeded.
+ // TODO: b/249823426 see if we can pull and call flushIfneeded for pulled value metrics.
+ if (!isPulled()) {
+ flushIfNeededLocked(dumpTimeNs);
+ }
if (includeCurrentPartialBucket) {
// For pull metrics, we need to do a pull at bucket boundaries. If we do not do that the
// current bucket will have incomplete data and the next will have the wrong snapshot to do
// a diff against. If the condition is false, we are fine since the base data is reset and
// we are not tracking anything.
- if (isPulled() && mCondition == ConditionState::kTrue) {
+ if (isPulled() && mCondition == ConditionState::kTrue && mIsActive) {
switch (dumpLatency) {
case FAST:
invalidateCurrentBucket(dumpTimeNs, BucketDropReason::DUMP_REPORT_REQUESTED);
@@ -422,8 +423,9 @@
for (int i = 0; i < (int)bucket.aggIndex.size(); i++) {
VLOG("\t bucket [%lld - %lld]", (long long)bucket.mBucketStartNs,
(long long)bucket.mBucketEndNs);
+ int sampleSize = !bucket.sampleSizes.empty() ? bucket.sampleSizes[i] : 0;
writePastBucketAggregateToProto(bucket.aggIndex[i], bucket.aggregates[i],
- protoOutput);
+ sampleSize, protoOutput);
}
protoOutput->end(bucketInfoToken);
}
@@ -452,19 +454,24 @@
template <typename AggregatedValue, typename DimExtras>
void ValueMetricProducer<AggregatedValue, DimExtras>::skipCurrentBucket(
const int64_t dropTimeNs, const BucketDropReason reason) {
+ if (!mIsActive) {
+ // Don't keep track of skipped buckets if metric is not active.
+ return;
+ }
+
if (!maxDropEventsReached()) {
mCurrentSkippedBucket.dropEvents.push_back(buildDropEvent(dropTimeNs, reason));
}
mCurrentBucketIsSkipped = true;
}
-// Handle active state change. Active state change is treated like a condition change:
+// Handle active state change. Active state change is *mostly* treated like a condition change:
// - drop bucket if active state change event arrives too late
// - if condition is true, pull data on active state changes
// - ConditionTimer tracks changes based on AND of condition and active state.
template <typename AggregatedValue, typename DimExtras>
void ValueMetricProducer<AggregatedValue, DimExtras>::onActiveStateChangedLocked(
- const int64_t eventTimeNs) {
+ const int64_t eventTimeNs, const bool isActive) {
const bool eventLate = isEventLateLocked(eventTimeNs);
if (eventLate) {
// Drop bucket because event arrived too late, ie. we are missing data for this bucket.
@@ -472,10 +479,9 @@
invalidateCurrentBucket(eventTimeNs, BucketDropReason::EVENT_IN_WRONG_BUCKET);
}
- // Call parent method once we've verified the validity of current bucket.
- MetricProducer::onActiveStateChangedLocked(eventTimeNs);
-
if (ConditionState::kTrue != mCondition) {
+ // Call parent method before early return.
+ MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
return;
}
@@ -485,15 +491,17 @@
pullAndMatchEventsLocked(eventTimeNs);
}
- onActiveStateChangedInternalLocked(eventTimeNs);
+ onActiveStateChangedInternalLocked(eventTimeNs, isActive);
}
- flushIfNeededLocked(eventTimeNs);
+ // Once any pulls are processed, call through to parent method which might flush the current
+ // bucket.
+ MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
// Let condition timer know of new active state.
- mConditionTimer.onConditionChanged(mIsActive, eventTimeNs);
+ mConditionTimer.onConditionChanged(isActive, eventTimeNs);
- updateCurrentSlicedBucketConditionTimers(mIsActive, eventTimeNs);
+ updateCurrentSlicedBucketConditionTimers(isActive, eventTimeNs);
}
template <typename AggregatedValue, typename DimExtras>
@@ -608,7 +616,7 @@
template <typename AggregatedValue, typename DimExtras>
bool ValueMetricProducer<AggregatedValue, DimExtras>::hitGuardRailLocked(
- const MetricDimensionKey& newKey) const {
+ const MetricDimensionKey& newKey) {
// ===========GuardRail==============
// 1. Report the tuple count if the tuple count > soft limit
if (mCurrentSlicedBucket.find(newKey) != mCurrentSlicedBucket.end()) {
@@ -619,8 +627,11 @@
StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mMetricId, newTupleCount);
// 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
if (hasReachedGuardRailLimit()) {
- ALOGE("ValueMetricProducer %lld dropping data for dimension key %s",
- (long long)mMetricId, newKey.toString().c_str());
+ if (!mHasHitGuardrail) {
+ ALOGE("ValueMetricProducer %lld dropping data for dimension key %s",
+ (long long)mMetricId, newKey.toString().c_str());
+ mHasHitGuardrail = true;
+ }
StatsdStats::getInstance().noteHardDimensionLimitReached(mMetricId);
return true;
}
@@ -873,6 +884,8 @@
mCurrentSkippedBucket.reset();
mCurrentBucketStartTimeNs = nextBucketStartTimeNs;
+ // Reset mHasHitGuardrail boolean since bucket was reset
+ mHasHitGuardrail = false;
VLOG("metric %lld: new bucket start time: %lld", (long long)mMetricId,
(long long)mCurrentBucketStartTimeNs);
}
diff --git a/statsd/src/metrics/ValueMetricProducer.h b/statsd/src/metrics/ValueMetricProducer.h
index 956580a..76675b2 100644
--- a/statsd/src/metrics/ValueMetricProducer.h
+++ b/statsd/src/metrics/ValueMetricProducer.h
@@ -24,7 +24,6 @@
#include "HashableDimensionKey.h"
#include "MetricProducer.h"
#include "anomaly/AnomalyTracker.h"
-#include "condition/ConditionTimer.h"
#include "condition/ConditionTracker.h"
#include "external/PullDataReceiver.h"
#include "external/StatsPullerManager.h"
@@ -43,6 +42,7 @@
int64_t mBucketEndNs;
std::vector<int> aggIndex;
std::vector<AggregatedValue> aggregates;
+ std::vector<int> sampleSizes;
/**
* If the metric has no condition, then this field is just wasted.
@@ -117,10 +117,14 @@
virtual ~ValueMetricProducer();
// Process data pulled on bucket boundary.
- virtual void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data, bool pullSuccess,
- int64_t originalPullTimeNs) override {
+ virtual void onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& data,
+ PullResult pullResult, int64_t originalPullTimeNs) override {
}
+ // Determine if metric needs to pull
+ virtual bool isPullNeeded() const override {
+ return false;
+ }
// ValueMetric needs special logic if it's a pulled atom.
void onStatsdInitCompleted(const int64_t& eventTimeNs) override;
@@ -167,9 +171,10 @@
void clearPastBucketsLocked(const int64_t dumpTimeNs) override;
// ValueMetricProducer internal interface to handle active state change.
- void onActiveStateChangedLocked(const int64_t eventTimeNs) override;
+ void onActiveStateChangedLocked(const int64_t eventTimeNs, const bool isActive) override;
- virtual void onActiveStateChangedInternalLocked(const int64_t eventTimeNs) {
+ virtual void onActiveStateChangedInternalLocked(const int64_t eventTimeNs,
+ const bool isActive) {
}
// ValueMetricProducer internal interface to handle condition change.
@@ -210,7 +215,7 @@
// causes the bucket to be invalidated will not notify StatsdStats.
void skipCurrentBucket(const int64_t dropTimeNs, const BucketDropReason reason);
- bool onConfigUpdatedLocked(
+ optional<InvalidConfigReason> onConfigUpdatedLocked(
const StatsdConfig& config, const int configIndex, const int metricIndex,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -315,7 +320,7 @@
const int64_t mMinBucketSizeNs;
// Util function to check whether the specified dimension hits the guardrail.
- bool hitGuardRailLocked(const MetricDimensionKey& newKey) const;
+ bool hitGuardRailLocked(const MetricDimensionKey& newKey);
bool hasReachedGuardRailLimit() const;
@@ -337,6 +342,7 @@
virtual void writePastBucketAggregateToProto(const int aggIndex,
const AggregatedValue& aggregate,
+ const int sampleSize,
ProtoOutputStream* const protoOutput) const = 0;
static const size_t kBucketSize = sizeof(PastBucket<AggregatedValue>{});
@@ -349,8 +355,6 @@
// BucketDropReason, many of which make the bucket potentially invalid.
bool mCurrentBucketIsSkipped;
- ConditionTimer mConditionTimer;
-
/** Stores condition correction threshold from the ValueMetric configuration */
optional<int64_t> mConditionCorrectionThresholdNs;
diff --git a/statsd/src/metrics/duration_helper/DurationTracker.h b/statsd/src/metrics/duration_helper/DurationTracker.h
index 849effa..36a0c51 100644
--- a/statsd/src/metrics/duration_helper/DurationTracker.h
+++ b/statsd/src/metrics/duration_helper/DurationTracker.h
@@ -55,6 +55,7 @@
int64_t mBucketStartNs;
int64_t mBucketEndNs;
int64_t mDuration;
+ int64_t mConditionTrueNs;
};
struct DurationValues {
@@ -86,7 +87,8 @@
mStartTimeNs(startTimeNs),
mConditionSliced(conditionSliced),
mHasLinksToAllConditionDimensionsInTracker(fullLink),
- mAnomalyTrackers(anomalyTrackers){};
+ mAnomalyTrackers(anomalyTrackers),
+ mHasHitGuardrail(false){};
virtual ~DurationTracker(){};
@@ -103,7 +105,7 @@
const bool stopAll) = 0;
virtual void noteStopAll(const int64_t eventTime) = 0;
- virtual void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) = 0;
+ virtual void onSlicedConditionMayChange(const int64_t timestamp) = 0;
virtual void onConditionChanged(bool condition, const int64_t timestamp) = 0;
virtual void onStateChanged(const int64_t timestamp, const int32_t atomId,
@@ -119,6 +121,7 @@
// an app upgrade, we assume that we're trying to form a partial bucket.
virtual bool flushCurrentBucket(
const int64_t& eventTimeNs, const optional<UploadThreshold>& uploadThreshold,
+ const int64_t globalConditionTrueNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) = 0;
// Predict the anomaly timestamp given the current status.
@@ -266,6 +269,8 @@
std::vector<sp<AnomalyTracker>> mAnomalyTrackers;
+ bool mHasHitGuardrail;
+
FRIEND_TEST(OringDurationTrackerTest, TestPredictAnomalyTimestamp);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetectionExpiredAlarm);
FRIEND_TEST(OringDurationTrackerTest, TestAnomalyDetectionFiredAlarm);
diff --git a/statsd/src/metrics/duration_helper/MaxDurationTracker.cpp b/statsd/src/metrics/duration_helper/MaxDurationTracker.cpp
index 0e93c28..abbdc53 100644
--- a/statsd/src/metrics/duration_helper/MaxDurationTracker.cpp
+++ b/statsd/src/metrics/duration_helper/MaxDurationTracker.cpp
@@ -48,8 +48,11 @@
StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mTrackerId, newTupleCount);
// 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
if (newTupleCount > StatsdStats::kDimensionKeySizeHardLimit) {
- ALOGE("MaxDurTracker %lld dropping data for dimension key %s",
- (long long)mTrackerId, newKey.toString().c_str());
+ if (!mHasHitGuardrail) {
+ ALOGE("MaxDurTracker %lld dropping data for dimension key %s",
+ (long long)mTrackerId, newKey.toString().c_str());
+ mHasHitGuardrail = true;
+ }
return true;
}
}
@@ -163,6 +166,7 @@
bool MaxDurationTracker::flushCurrentBucket(
const int64_t& eventTimeNs, const optional<UploadThreshold>& uploadThreshold,
+ const int64_t globalConditionTrueNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
VLOG("MaxDurationTracker flushing.....");
@@ -197,6 +201,7 @@
info.mBucketStartNs = mCurrentBucketStartTimeNs;
info.mBucketEndNs = currentBucketEndTimeNs;
info.mDuration = mDuration;
+ info.mConditionTrueNs = globalConditionTrueNs;
(*output)[mEventKey].push_back(info);
VLOG(" final duration for last bucket: %lld", (long long)mDuration);
} else {
@@ -211,6 +216,8 @@
}
mDuration = 0;
+ // Reset mHasHitGuardrail boolean since bucket was reset
+ mHasHitGuardrail = false;
// If this tracker has no pending events, tell owner to remove.
return !hasPendingEvent;
}
@@ -221,11 +228,10 @@
if (eventTimeNs < getCurrentBucketEndTimeNs()) {
return false;
}
- return flushCurrentBucket(eventTimeNs, uploadThreshold, output);
+ return flushCurrentBucket(eventTimeNs, uploadThreshold, /*globalConditionTrueNs*/ 0, output);
}
-void MaxDurationTracker::onSlicedConditionMayChange(bool overallCondition,
- const int64_t timestamp) {
+void MaxDurationTracker::onSlicedConditionMayChange(const int64_t timestamp) {
// Now for each of the on-going event, check if the condition has changed for them.
for (auto& pair : mInfos) {
if (pair.second.state == kStopped) {
diff --git a/statsd/src/metrics/duration_helper/MaxDurationTracker.h b/statsd/src/metrics/duration_helper/MaxDurationTracker.h
index b1fdd6e..dd2acc3 100644
--- a/statsd/src/metrics/duration_helper/MaxDurationTracker.h
+++ b/statsd/src/metrics/duration_helper/MaxDurationTracker.h
@@ -47,9 +47,10 @@
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
bool flushCurrentBucket(
const int64_t& eventTimeNs, const optional<UploadThreshold>& uploadThreshold,
+ const int64_t globalConditionTrueNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>*) override;
- void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) override;
+ void onSlicedConditionMayChange(const int64_t timestamp) override;
void onConditionChanged(bool condition, const int64_t timestamp) override;
void onStateChanged(const int64_t timestamp, const int32_t atomId,
diff --git a/statsd/src/metrics/duration_helper/OringDurationTracker.cpp b/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
index b9dafda..c3cfb1d 100644
--- a/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
+++ b/statsd/src/metrics/duration_helper/OringDurationTracker.cpp
@@ -48,8 +48,11 @@
StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mTrackerId, newTupleCount);
// 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
if (newTupleCount > StatsdStats::kDimensionKeySizeHardLimit) {
- ALOGE("OringDurTracker %lld dropping data for dimension key %s",
- (long long)mTrackerId, newKey.toString().c_str());
+ if (!mHasHitGuardrail) {
+ ALOGE("OringDurTracker %lld dropping data for dimension key %s",
+ (long long)mTrackerId, newKey.toString().c_str());
+ mHasHitGuardrail = true;
+ }
return true;
}
}
@@ -133,6 +136,7 @@
bool OringDurationTracker::flushCurrentBucket(
const int64_t& eventTimeNs, const optional<UploadThreshold>& uploadThreshold,
+ const int64_t globalConditionTrueNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
VLOG("OringDurationTracker Flushing.............");
@@ -169,6 +173,7 @@
current_info.mBucketStartNs = mCurrentBucketStartTimeNs;
current_info.mBucketEndNs = currentBucketEndTimeNs;
current_info.mDuration = durationIt.second.mDuration;
+ current_info.mConditionTrueNs = globalConditionTrueNs;
(*output)[MetricDimensionKey(mEventKey.getDimensionKeyInWhat(), durationIt.first)]
.push_back(current_info);
@@ -214,6 +219,8 @@
mCurrentBucketStartTimeNs = eventTimeNs;
}
mLastStartTime = mCurrentBucketStartTimeNs;
+ // Reset mHasHitGuardrail boolean since bucket was reset
+ mHasHitGuardrail = false;
// If all stopped, then tell owner it's safe to remove this tracker on a full bucket.
// On a partial bucket, only clear if no anomaly trackers, as full bucket duration is used
@@ -229,11 +236,10 @@
if (eventTimeNs < getCurrentBucketEndTimeNs()) {
return false;
}
- return flushCurrentBucket(eventTimeNs, uploadThreshold, output);
+ return flushCurrentBucket(eventTimeNs, uploadThreshold, /*globalConditionTrueNs=*/0, output);
}
-void OringDurationTracker::onSlicedConditionMayChange(bool overallCondition,
- const int64_t timestamp) {
+void OringDurationTracker::onSlicedConditionMayChange(const int64_t timestamp) {
vector<pair<HashableDimensionKey, int>> startedToPaused;
vector<pair<HashableDimensionKey, int>> pausedToStarted;
if (!mStarted.empty()) {
diff --git a/statsd/src/metrics/duration_helper/OringDurationTracker.h b/statsd/src/metrics/duration_helper/OringDurationTracker.h
index 8f8d535..45cec8b 100644
--- a/statsd/src/metrics/duration_helper/OringDurationTracker.h
+++ b/statsd/src/metrics/duration_helper/OringDurationTracker.h
@@ -41,7 +41,7 @@
const bool stopAll) override;
void noteStopAll(const int64_t eventTime) override;
- void onSlicedConditionMayChange(bool overallCondition, const int64_t timestamp) override;
+ void onSlicedConditionMayChange(const int64_t timestamp) override;
void onConditionChanged(bool condition, const int64_t timestamp) override;
void onStateChanged(const int64_t timestamp, const int32_t atomId,
@@ -49,6 +49,7 @@
bool flushCurrentBucket(
const int64_t& eventTimeNs, const optional<UploadThreshold>& uploadThreshold,
+ const int64_t globalConditionTrueNs,
std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) override;
bool flushIfNeeded(
int64_t timestampNs, const optional<UploadThreshold>& uploadThreshold,
diff --git a/statsd/src/metrics/parsing_utils/config_update_utils.cpp b/statsd/src/metrics/parsing_utils/config_update_utils.cpp
index c0a1efa..e7f30bc 100644
--- a/statsd/src/metrics/parsing_utils/config_update_utils.cpp
+++ b/statsd/src/metrics/parsing_utils/config_update_utils.cpp
@@ -31,16 +31,16 @@
namespace statsd {
// Recursive function to determine if a matcher needs to be updated. Populates matcherToUpdate.
-// Returns whether the function was successful or not.
-bool determineMatcherUpdateStatus(const StatsdConfig& config, const int matcherIdx,
- const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
- const vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
- const unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
- vector<UpdateStatus>& matchersToUpdate,
- vector<bool>& cycleTracker) {
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> determineMatcherUpdateStatus(
+ const StatsdConfig& config, const int matcherIdx,
+ const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
+ const vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
+ const unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
+ vector<UpdateStatus>& matchersToUpdate, vector<bool>& cycleTracker) {
// Have already examined this matcher.
if (matchersToUpdate[matcherIdx] != UPDATE_UNKNOWN) {
- return true;
+ return nullopt;
}
const AtomMatcher& matcher = config.atom_matcher(matcherIdx);
@@ -49,25 +49,27 @@
const auto& oldAtomMatchingTrackerIt = oldAtomMatchingTrackerMap.find(id);
if (oldAtomMatchingTrackerIt == oldAtomMatchingTrackerMap.end()) {
matchersToUpdate[matcherIdx] = UPDATE_NEW;
- return true;
+ return nullopt;
}
// This is an existing matcher. Check if it has changed.
string serializedMatcher;
if (!matcher.SerializeToString(&serializedMatcher)) {
ALOGE("Unable to serialize matcher %lld", (long long)id);
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_SERIALIZATION_FAILED, id);
}
uint64_t newProtoHash = Hash64(serializedMatcher);
if (newProtoHash != oldAtomMatchingTrackers[oldAtomMatchingTrackerIt->second]->getProtoHash()) {
matchersToUpdate[matcherIdx] = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
+ optional<InvalidConfigReason> invalidConfigReason;
switch (matcher.contents_case()) {
case AtomMatcher::ContentsCase::kSimpleAtomMatcher: {
matchersToUpdate[matcherIdx] = UPDATE_PRESERVE;
- return true;
+ return nullopt;
}
case AtomMatcher::ContentsCase::kCombination: {
// Recurse to check if children have changed.
@@ -77,17 +79,25 @@
const auto& childIt = newAtomMatchingTrackerMap.find(childMatcherId);
if (childIt == newAtomMatchingTrackerMap.end()) {
ALOGW("Matcher %lld not found in the config", (long long)childMatcherId);
- return false;
+ invalidConfigReason = createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_CHILD_NOT_FOUND, id);
+ invalidConfigReason->matcherIds.push_back(childMatcherId);
+ return invalidConfigReason;
}
const int childIdx = childIt->second;
if (cycleTracker[childIdx]) {
ALOGE("Cycle detected in matcher config");
- return false;
+ invalidConfigReason = createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_CYCLE, id);
+ invalidConfigReason->matcherIds.push_back(childMatcherId);
+ return invalidConfigReason;
}
- if (!determineMatcherUpdateStatus(
- config, childIdx, oldAtomMatchingTrackerMap, oldAtomMatchingTrackers,
- newAtomMatchingTrackerMap, matchersToUpdate, cycleTracker)) {
- return false;
+ invalidConfigReason = determineMatcherUpdateStatus(
+ config, childIdx, oldAtomMatchingTrackerMap, oldAtomMatchingTrackers,
+ newAtomMatchingTrackerMap, matchersToUpdate, cycleTracker);
+ if (invalidConfigReason.has_value()) {
+ invalidConfigReason->matcherIds.push_back(id);
+ return invalidConfigReason;
}
if (matchersToUpdate[childIdx] == UPDATE_REPLACE) {
@@ -97,34 +107,37 @@
}
matchersToUpdate[matcherIdx] = status;
cycleTracker[matcherIdx] = false;
- return true;
+ return nullopt;
}
default: {
ALOGE("Matcher \"%lld\" malformed", (long long)id);
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_MALFORMED_CONTENTS_CASE, id);
}
}
- return true;
+ return nullopt;
}
-bool updateAtomMatchingTrackers(const StatsdConfig& config, const sp<UidMap>& uidMap,
- const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
- const vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
- set<int>& allTagIds,
- unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
- vector<sp<AtomMatchingTracker>>& newAtomMatchingTrackers,
- set<int64_t>& replacedMatchers) {
+optional<InvalidConfigReason> updateAtomMatchingTrackers(
+ const StatsdConfig& config, const sp<UidMap>& uidMap,
+ const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
+ const vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
+ std::unordered_map<int, std::vector<int>>& allTagIdsToMatchersMap,
+ unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
+ vector<sp<AtomMatchingTracker>>& newAtomMatchingTrackers, set<int64_t>& replacedMatchers) {
const int atomMatcherCount = config.atom_matcher_size();
vector<AtomMatcher> matcherProtos;
matcherProtos.reserve(atomMatcherCount);
newAtomMatchingTrackers.reserve(atomMatcherCount);
+ optional<InvalidConfigReason> invalidConfigReason;
// Maps matcher id to their position in the config. For fast lookup of dependencies.
for (int i = 0; i < atomMatcherCount; i++) {
const AtomMatcher& matcher = config.atom_matcher(i);
if (newAtomMatchingTrackerMap.find(matcher.id()) != newAtomMatchingTrackerMap.end()) {
ALOGE("Duplicate atom matcher found for id %lld", (long long)matcher.id());
- return false;
+ return createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_MATCHER_DUPLICATE,
+ matcher.id());
}
newAtomMatchingTrackerMap[matcher.id()] = i;
matcherProtos.push_back(matcher);
@@ -134,10 +147,11 @@
vector<UpdateStatus> matchersToUpdate(atomMatcherCount, UPDATE_UNKNOWN);
vector<bool> cycleTracker(atomMatcherCount, false);
for (int i = 0; i < atomMatcherCount; i++) {
- if (!determineMatcherUpdateStatus(config, i, oldAtomMatchingTrackerMap,
- oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
- matchersToUpdate, cycleTracker)) {
- return false;
+ invalidConfigReason = determineMatcherUpdateStatus(
+ config, i, oldAtomMatchingTrackerMap, oldAtomMatchingTrackers,
+ newAtomMatchingTrackerMap, matchersToUpdate, cycleTracker);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
@@ -151,13 +165,16 @@
ALOGE("Could not find AtomMatcher %lld in the previous config, but expected it "
"to be there",
(long long)id);
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_NOT_IN_PREV_CONFIG, id);
}
const sp<AtomMatchingTracker>& tracker =
oldAtomMatchingTrackers[oldAtomMatchingTrackerIt->second];
- if (!tracker->onConfigUpdated(matcherProtos[i], i, newAtomMatchingTrackerMap)) {
+ invalidConfigReason =
+ tracker->onConfigUpdated(matcherProtos[i], i, newAtomMatchingTrackerMap);
+ if (invalidConfigReason.has_value()) {
ALOGW("Config update failed for matcher %lld", (long long)id);
- return false;
+ return invalidConfigReason;
}
newAtomMatchingTrackers.push_back(tracker);
break;
@@ -166,9 +183,10 @@
replacedMatchers.insert(id);
[[fallthrough]]; // Intentionally fallthrough to create the new matcher.
case UPDATE_NEW: {
- sp<AtomMatchingTracker> tracker = createAtomMatchingTracker(matcher, i, uidMap);
+ sp<AtomMatchingTracker> tracker =
+ createAtomMatchingTracker(matcher, i, uidMap, invalidConfigReason);
if (tracker == nullptr) {
- return false;
+ return invalidConfigReason;
}
newAtomMatchingTrackers.push_back(tracker);
break;
@@ -176,37 +194,54 @@
default: {
ALOGE("Matcher \"%lld\" update state is unknown. This should never happen",
(long long)id);
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_UPDATE_STATUS_UNKNOWN, id);
}
}
}
std::fill(cycleTracker.begin(), cycleTracker.end(), false);
- for (auto& matcher : newAtomMatchingTrackers) {
- if (!matcher->init(matcherProtos, newAtomMatchingTrackers, newAtomMatchingTrackerMap,
- cycleTracker)) {
- return false;
+ for (size_t matcherIndex = 0; matcherIndex < newAtomMatchingTrackers.size(); matcherIndex++) {
+ auto& matcher = newAtomMatchingTrackers[matcherIndex];
+ invalidConfigReason = matcher->init(matcherProtos, newAtomMatchingTrackers,
+ newAtomMatchingTrackerMap, cycleTracker);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
+
// Collect all the tag ids that are interesting. TagIds exist in leaf nodes only.
const set<int>& tagIds = matcher->getAtomIds();
- allTagIds.insert(tagIds.begin(), tagIds.end());
+ for (int atomId : tagIds) {
+ auto& matchers = allTagIdsToMatchersMap[atomId];
+ // Performance note:
+ // For small amount of elements linear search in vector will be
+ // faster then look up in a set:
+ // - we do not expect matchers vector per atom id will have significant size (< 10)
+ // - iteration via vector is the fastest way compared to other containers (set, etc.)
+ // in the hot path MetricsManager::onLogEvent()
+ // - vector<T> will have the smallest memory footprint compared to any other
+ // std containers implementation
+ if (find(matchers.begin(), matchers.end(), matcherIndex) == matchers.end()) {
+ matchers.push_back(matcherIndex);
+ }
+ }
}
- return true;
+ return nullopt;
}
// Recursive function to determine if a condition needs to be updated. Populates conditionsToUpdate.
-// Returns whether the function was successful or not.
-bool determineConditionUpdateStatus(const StatsdConfig& config, const int conditionIdx,
- const unordered_map<int64_t, int>& oldConditionTrackerMap,
- const vector<sp<ConditionTracker>>& oldConditionTrackers,
- const unordered_map<int64_t, int>& newConditionTrackerMap,
- const set<int64_t>& replacedMatchers,
- vector<UpdateStatus>& conditionsToUpdate,
- vector<bool>& cycleTracker) {
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> determineConditionUpdateStatus(
+ const StatsdConfig& config, const int conditionIdx,
+ const unordered_map<int64_t, int>& oldConditionTrackerMap,
+ const vector<sp<ConditionTracker>>& oldConditionTrackers,
+ const unordered_map<int64_t, int>& newConditionTrackerMap,
+ const set<int64_t>& replacedMatchers, vector<UpdateStatus>& conditionsToUpdate,
+ vector<bool>& cycleTracker) {
// Have already examined this condition.
if (conditionsToUpdate[conditionIdx] != UPDATE_UNKNOWN) {
- return true;
+ return nullopt;
}
const Predicate& predicate = config.predicate(conditionIdx);
@@ -215,21 +250,23 @@
const auto& oldConditionTrackerIt = oldConditionTrackerMap.find(id);
if (oldConditionTrackerIt == oldConditionTrackerMap.end()) {
conditionsToUpdate[conditionIdx] = UPDATE_NEW;
- return true;
+ return nullopt;
}
// This is an existing condition. Check if it has changed.
string serializedCondition;
if (!predicate.SerializeToString(&serializedCondition)) {
- ALOGE("Unable to serialize matcher %lld", (long long)id);
- return false;
+ ALOGE("Unable to serialize predicate %lld", (long long)id);
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_SERIALIZATION_FAILED, id);
}
uint64_t newProtoHash = Hash64(serializedCondition);
if (newProtoHash != oldConditionTrackers[oldConditionTrackerIt->second]->getProtoHash()) {
conditionsToUpdate[conditionIdx] = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
+ optional<InvalidConfigReason> invalidConfigReason;
switch (predicate.contents_case()) {
case Predicate::ContentsCase::kSimplePredicate: {
// Need to check if any of the underlying matchers changed.
@@ -237,23 +274,23 @@
if (simplePredicate.has_start()) {
if (replacedMatchers.find(simplePredicate.start()) != replacedMatchers.end()) {
conditionsToUpdate[conditionIdx] = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
}
if (simplePredicate.has_stop()) {
if (replacedMatchers.find(simplePredicate.stop()) != replacedMatchers.end()) {
conditionsToUpdate[conditionIdx] = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
}
if (simplePredicate.has_stop_all()) {
if (replacedMatchers.find(simplePredicate.stop_all()) != replacedMatchers.end()) {
conditionsToUpdate[conditionIdx] = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
}
conditionsToUpdate[conditionIdx] = UPDATE_PRESERVE;
- return true;
+ return nullopt;
}
case Predicate::ContentsCase::kCombination: {
// Need to recurse on the children to see if any of the child predicates changed.
@@ -263,18 +300,25 @@
const auto& childIt = newConditionTrackerMap.find(childPredicateId);
if (childIt == newConditionTrackerMap.end()) {
ALOGW("Predicate %lld not found in the config", (long long)childPredicateId);
- return false;
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_CHILD_NOT_FOUND, id);
+ invalidConfigReason->conditionIds.push_back(childPredicateId);
+ return invalidConfigReason;
}
const int childIdx = childIt->second;
if (cycleTracker[childIdx]) {
ALOGE("Cycle detected in predicate config");
- return false;
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_CYCLE, id);
+ invalidConfigReason->conditionIds.push_back(childPredicateId);
+ return invalidConfigReason;
}
- if (!determineConditionUpdateStatus(config, childIdx, oldConditionTrackerMap,
- oldConditionTrackers, newConditionTrackerMap,
- replacedMatchers, conditionsToUpdate,
- cycleTracker)) {
- return false;
+ invalidConfigReason = determineConditionUpdateStatus(
+ config, childIdx, oldConditionTrackerMap, oldConditionTrackers,
+ newConditionTrackerMap, replacedMatchers, conditionsToUpdate, cycleTracker);
+ if (invalidConfigReason.has_value()) {
+ invalidConfigReason->conditionIds.push_back(id);
+ return invalidConfigReason;
}
if (conditionsToUpdate[childIdx] == UPDATE_REPLACE) {
@@ -284,37 +328,41 @@
}
conditionsToUpdate[conditionIdx] = status;
cycleTracker[conditionIdx] = false;
- return true;
+ return nullopt;
}
default: {
ALOGE("Predicate \"%lld\" malformed", (long long)id);
- return false;
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_MALFORMED_CONTENTS_CASE, id);
}
}
- return true;
+ return nullopt;
}
-bool updateConditions(const ConfigKey& key, const StatsdConfig& config,
- const unordered_map<int64_t, int>& atomMatchingTrackerMap,
- const set<int64_t>& replacedMatchers,
- const unordered_map<int64_t, int>& oldConditionTrackerMap,
- const vector<sp<ConditionTracker>>& oldConditionTrackers,
- unordered_map<int64_t, int>& newConditionTrackerMap,
- vector<sp<ConditionTracker>>& newConditionTrackers,
- unordered_map<int, vector<int>>& trackerToConditionMap,
- vector<ConditionState>& conditionCache, set<int64_t>& replacedConditions) {
+optional<InvalidConfigReason> updateConditions(
+ const ConfigKey& key, const StatsdConfig& config,
+ const unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ const set<int64_t>& replacedMatchers,
+ const unordered_map<int64_t, int>& oldConditionTrackerMap,
+ const vector<sp<ConditionTracker>>& oldConditionTrackers,
+ unordered_map<int64_t, int>& newConditionTrackerMap,
+ vector<sp<ConditionTracker>>& newConditionTrackers,
+ unordered_map<int, vector<int>>& trackerToConditionMap,
+ vector<ConditionState>& conditionCache, set<int64_t>& replacedConditions) {
vector<Predicate> conditionProtos;
const int conditionTrackerCount = config.predicate_size();
conditionProtos.reserve(conditionTrackerCount);
newConditionTrackers.reserve(conditionTrackerCount);
conditionCache.assign(conditionTrackerCount, ConditionState::kNotEvaluated);
+ optional<InvalidConfigReason> invalidConfigReason;
for (int i = 0; i < conditionTrackerCount; i++) {
const Predicate& condition = config.predicate(i);
if (newConditionTrackerMap.find(condition.id()) != newConditionTrackerMap.end()) {
ALOGE("Duplicate Predicate found!");
- return false;
+ return createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_CONDITION_DUPLICATE,
+ condition.id());
}
newConditionTrackerMap[condition.id()] = i;
conditionProtos.push_back(condition);
@@ -323,10 +371,11 @@
vector<UpdateStatus> conditionsToUpdate(conditionTrackerCount, UPDATE_UNKNOWN);
vector<bool> cycleTracker(conditionTrackerCount, false);
for (int i = 0; i < conditionTrackerCount; i++) {
- if (!determineConditionUpdateStatus(config, i, oldConditionTrackerMap, oldConditionTrackers,
- newConditionTrackerMap, replacedMatchers,
- conditionsToUpdate, cycleTracker)) {
- return false;
+ invalidConfigReason = determineConditionUpdateStatus(
+ config, i, oldConditionTrackerMap, oldConditionTrackers, newConditionTrackerMap,
+ replacedMatchers, conditionsToUpdate, cycleTracker);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
@@ -343,7 +392,8 @@
ALOGE("Could not find Predicate %lld in the previous config, but expected it "
"to be there",
(long long)id);
- return false;
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_NOT_IN_PREV_CONFIG, id);
}
const int oldIndex = oldConditionTrackerIt->second;
newConditionTrackers.push_back(oldConditionTrackers[oldIndex]);
@@ -353,10 +403,10 @@
replacedConditions.insert(id);
[[fallthrough]]; // Intentionally fallthrough to create the new condition tracker.
case UPDATE_NEW: {
- sp<ConditionTracker> tracker =
- createConditionTracker(key, predicate, i, atomMatchingTrackerMap);
+ sp<ConditionTracker> tracker = createConditionTracker(
+ key, predicate, i, atomMatchingTrackerMap, invalidConfigReason);
if (tracker == nullptr) {
- return false;
+ return invalidConfigReason;
}
newConditionTrackers.push_back(tracker);
break;
@@ -364,19 +414,21 @@
default: {
ALOGE("Condition \"%lld\" update state is unknown. This should never happen",
(long long)id);
- return false;
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_UPDATE_STATUS_UNKNOWN, id);
}
}
}
// Update indices of preserved predicates.
for (const int conditionIndex : preservedConditions) {
- if (!newConditionTrackers[conditionIndex]->onConfigUpdated(
- conditionProtos, conditionIndex, newConditionTrackers, atomMatchingTrackerMap,
- newConditionTrackerMap)) {
+ invalidConfigReason = newConditionTrackers[conditionIndex]->onConfigUpdated(
+ conditionProtos, conditionIndex, newConditionTrackers, atomMatchingTrackerMap,
+ newConditionTrackerMap);
+ if (invalidConfigReason.has_value()) {
ALOGE("Failed to update condition %lld",
(long long)newConditionTrackers[conditionIndex]->getConditionId());
- return false;
+ return invalidConfigReason;
}
}
@@ -384,25 +436,30 @@
for (int conditionIndex = 0; conditionIndex < conditionTrackerCount; conditionIndex++) {
const sp<ConditionTracker>& conditionTracker = newConditionTrackers[conditionIndex];
// Calling init on preserved conditions is OK. It is needed to fill the condition cache.
- if (!conditionTracker->init(conditionProtos, newConditionTrackers, newConditionTrackerMap,
- cycleTracker, conditionCache)) {
- return false;
+ invalidConfigReason =
+ conditionTracker->init(conditionProtos, newConditionTrackers,
+ newConditionTrackerMap, cycleTracker, conditionCache);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
for (const int trackerIndex : conditionTracker->getAtomMatchingTrackerIndex()) {
vector<int>& conditionList = trackerToConditionMap[trackerIndex];
conditionList.push_back(conditionIndex);
}
}
- return true;
+ return nullopt;
}
-bool updateStates(const StatsdConfig& config, const map<int64_t, uint64_t>& oldStateProtoHashes,
- unordered_map<int64_t, int>& stateAtomIdMap,
- unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
- map<int64_t, uint64_t>& newStateProtoHashes, set<int64_t>& replacedStates) {
+optional<InvalidConfigReason> updateStates(
+ const StatsdConfig& config, const map<int64_t, uint64_t>& oldStateProtoHashes,
+ unordered_map<int64_t, int>& stateAtomIdMap,
+ unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
+ map<int64_t, uint64_t>& newStateProtoHashes, set<int64_t>& replacedStates) {
// Share with metrics_manager_util.
- if (!initStates(config, stateAtomIdMap, allStateGroupMaps, newStateProtoHashes)) {
- return false;
+ optional<InvalidConfigReason> invalidConfigReason =
+ initStates(config, stateAtomIdMap, allStateGroupMaps, newStateProtoHashes);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
for (const auto& [stateId, stateHash] : oldStateProtoHashes) {
@@ -411,7 +468,7 @@
replacedStates.insert(stateId);
}
}
- return true;
+ return nullopt;
}
// Returns true if any matchers in the metric activation were replaced.
bool metricActivationDepsChange(const StatsdConfig& config,
@@ -437,7 +494,7 @@
return false;
}
-bool determineMetricUpdateStatus(
+optional<InvalidConfigReason> determineMetricUpdateStatus(
const StatsdConfig& config, const MessageLite& metric, const int64_t metricId,
const MetricType metricType, const set<int64_t>& matcherDependencies,
const set<int64_t>& conditionDependencies,
@@ -452,19 +509,21 @@
const auto& oldMetricProducerIt = oldMetricProducerMap.find(metricId);
if (oldMetricProducerIt == oldMetricProducerMap.end()) {
updateStatus = UPDATE_NEW;
- return true;
+ return nullopt;
}
// This is an existing metric, check if it has changed.
uint64_t metricHash;
- if (!getMetricProtoHash(config, metric, metricId, metricToActivationMap, metricHash)) {
- return false;
+ optional<InvalidConfigReason> invalidConfigReason =
+ getMetricProtoHash(config, metric, metricId, metricToActivationMap, metricHash);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
const sp<MetricProducer> oldMetricProducer = oldMetricProducers[oldMetricProducerIt->second];
if (oldMetricProducer->getMetricType() != metricType ||
oldMetricProducer->getProtoHash() != metricHash) {
updateStatus = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
// Take intersections of the matchers/predicates/states that the metric
@@ -476,60 +535,59 @@
inserter(intersection, intersection.begin()));
if (intersection.size() > 0) {
updateStatus = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
set_intersection(conditionDependencies.begin(), conditionDependencies.end(),
replacedConditions.begin(), replacedConditions.end(),
inserter(intersection, intersection.begin()));
if (intersection.size() > 0) {
updateStatus = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
set_intersection(stateDependencies.begin(), stateDependencies.end(), replacedStates.begin(),
replacedStates.end(), inserter(intersection, intersection.begin()));
if (intersection.size() > 0) {
updateStatus = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
for (const auto& metricConditionLink : conditionLinks) {
if (replacedConditions.find(metricConditionLink.condition()) != replacedConditions.end()) {
updateStatus = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
}
if (metricActivationDepsChange(config, metricToActivationMap, metricId, replacedMatchers)) {
updateStatus = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
updateStatus = UPDATE_PRESERVE;
- return true;
+ return nullopt;
}
-bool determineAllMetricUpdateStatuses(const StatsdConfig& config,
- const unordered_map<int64_t, int>& oldMetricProducerMap,
- const vector<sp<MetricProducer>>& oldMetricProducers,
- const unordered_map<int64_t, int>& metricToActivationMap,
- const set<int64_t>& replacedMatchers,
- const set<int64_t>& replacedConditions,
- const set<int64_t>& replacedStates,
- vector<UpdateStatus>& metricsToUpdate) {
+optional<InvalidConfigReason> determineAllMetricUpdateStatuses(
+ const StatsdConfig& config, const unordered_map<int64_t, int>& oldMetricProducerMap,
+ const vector<sp<MetricProducer>>& oldMetricProducers,
+ const unordered_map<int64_t, int>& metricToActivationMap,
+ const set<int64_t>& replacedMatchers, const set<int64_t>& replacedConditions,
+ const set<int64_t>& replacedStates, vector<UpdateStatus>& metricsToUpdate) {
int metricIndex = 0;
+ optional<InvalidConfigReason> invalidConfigReason;
for (int i = 0; i < config.count_metric_size(); i++, metricIndex++) {
const CountMetric& metric = config.count_metric(i);
set<int64_t> conditionDependencies;
if (metric.has_condition()) {
conditionDependencies.insert(metric.condition());
}
- if (!determineMetricUpdateStatus(
- config, metric, metric.id(), METRIC_TYPE_COUNT, {metric.what()},
- conditionDependencies, metric.slice_by_state(), metric.links(),
- oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- replacedMatchers, replacedConditions, replacedStates,
- metricsToUpdate[metricIndex])) {
- return false;
+ invalidConfigReason = determineMetricUpdateStatus(
+ config, metric, metric.id(), METRIC_TYPE_COUNT, {metric.what()},
+ conditionDependencies, metric.slice_by_state(), metric.links(),
+ oldMetricProducerMap, oldMetricProducers, metricToActivationMap, replacedMatchers,
+ replacedConditions, replacedStates, metricsToUpdate[metricIndex]);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
for (int i = 0; i < config.duration_metric_size(); i++, metricIndex++) {
@@ -538,13 +596,13 @@
if (metric.has_condition()) {
conditionDependencies.insert(metric.condition());
}
- if (!determineMetricUpdateStatus(
- config, metric, metric.id(), METRIC_TYPE_DURATION, /*matcherDependencies=*/{},
- conditionDependencies, metric.slice_by_state(), metric.links(),
- oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- replacedMatchers, replacedConditions, replacedStates,
- metricsToUpdate[metricIndex])) {
- return false;
+ invalidConfigReason = determineMetricUpdateStatus(
+ config, metric, metric.id(), METRIC_TYPE_DURATION, /*matcherDependencies=*/{},
+ conditionDependencies, metric.slice_by_state(), metric.links(),
+ oldMetricProducerMap, oldMetricProducers, metricToActivationMap, replacedMatchers,
+ replacedConditions, replacedStates, metricsToUpdate[metricIndex]);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
for (int i = 0; i < config.event_metric_size(); i++, metricIndex++) {
@@ -553,13 +611,13 @@
if (metric.has_condition()) {
conditionDependencies.insert(metric.condition());
}
- if (!determineMetricUpdateStatus(
- config, metric, metric.id(), METRIC_TYPE_EVENT, {metric.what()},
- conditionDependencies, ::google::protobuf::RepeatedField<int64_t>(),
- metric.links(), oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- replacedMatchers, replacedConditions, replacedStates,
- metricsToUpdate[metricIndex])) {
- return false;
+ invalidConfigReason = determineMetricUpdateStatus(
+ config, metric, metric.id(), METRIC_TYPE_EVENT, {metric.what()},
+ conditionDependencies, ::google::protobuf::RepeatedField<int64_t>(), metric.links(),
+ oldMetricProducerMap, oldMetricProducers, metricToActivationMap, replacedMatchers,
+ replacedConditions, replacedStates, metricsToUpdate[metricIndex]);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
for (int i = 0; i < config.value_metric_size(); i++, metricIndex++) {
@@ -568,13 +626,13 @@
if (metric.has_condition()) {
conditionDependencies.insert(metric.condition());
}
- if (!determineMetricUpdateStatus(
- config, metric, metric.id(), METRIC_TYPE_VALUE, {metric.what()},
- conditionDependencies, metric.slice_by_state(), metric.links(),
- oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- replacedMatchers, replacedConditions, replacedStates,
- metricsToUpdate[metricIndex])) {
- return false;
+ invalidConfigReason = determineMetricUpdateStatus(
+ config, metric, metric.id(), METRIC_TYPE_VALUE, {metric.what()},
+ conditionDependencies, metric.slice_by_state(), metric.links(),
+ oldMetricProducerMap, oldMetricProducers, metricToActivationMap, replacedMatchers,
+ replacedConditions, replacedStates, metricsToUpdate[metricIndex]);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
for (int i = 0; i < config.gauge_metric_size(); i++, metricIndex++) {
@@ -587,13 +645,13 @@
if (metric.has_trigger_event()) {
matcherDependencies.insert(metric.trigger_event());
}
- if (!determineMetricUpdateStatus(
- config, metric, metric.id(), METRIC_TYPE_GAUGE, matcherDependencies,
- conditionDependencies, ::google::protobuf::RepeatedField<int64_t>(),
- metric.links(), oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- replacedMatchers, replacedConditions, replacedStates,
- metricsToUpdate[metricIndex])) {
- return false;
+ invalidConfigReason = determineMetricUpdateStatus(
+ config, metric, metric.id(), METRIC_TYPE_GAUGE, matcherDependencies,
+ conditionDependencies, ::google::protobuf::RepeatedField<int64_t>(), metric.links(),
+ oldMetricProducerMap, oldMetricProducers, metricToActivationMap, replacedMatchers,
+ replacedConditions, replacedStates, metricsToUpdate[metricIndex]);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
@@ -603,17 +661,17 @@
if (metric.has_condition()) {
conditionDependencies.insert(metric.condition());
}
- if (!determineMetricUpdateStatus(
- config, metric, metric.id(), METRIC_TYPE_KLL, {metric.what()},
- conditionDependencies, metric.slice_by_state(), metric.links(),
- oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- replacedMatchers, replacedConditions, replacedStates,
- metricsToUpdate[metricIndex])) {
- return false;
+ invalidConfigReason = determineMetricUpdateStatus(
+ config, metric, metric.id(), METRIC_TYPE_KLL, {metric.what()},
+ conditionDependencies, metric.slice_by_state(), metric.links(),
+ oldMetricProducerMap, oldMetricProducers, metricToActivationMap, replacedMatchers,
+ replacedConditions, replacedStates, metricsToUpdate[metricIndex]);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
- return true;
+ return nullopt;
}
// Called when a metric is preserved during a config update. Finds the metric in oldMetricProducers
@@ -633,56 +691,64 @@
unordered_map<int, vector<int>>& conditionToMetricMap,
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation) {
+ vector<int>& metricsWithActivation, optional<InvalidConfigReason>& invalidConfigReason) {
const auto& oldMetricProducerIt = oldMetricProducerMap.find(metricId);
if (oldMetricProducerIt == oldMetricProducerMap.end()) {
ALOGE("Could not find Metric %lld in the previous config, but expected it "
"to be there",
(long long)metricId);
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_NOT_IN_PREV_CONFIG, metricId);
return nullopt;
}
const int oldIndex = oldMetricProducerIt->second;
sp<MetricProducer> producer = oldMetricProducers[oldIndex];
- if (!producer->onConfigUpdated(config, configIndex, metricIndex, allAtomMatchingTrackers,
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap,
- matcherWizard, allConditionTrackers, conditionTrackerMap, wizard,
- metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
- activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation)) {
+ invalidConfigReason = producer->onConfigUpdated(
+ config, configIndex, metricIndex, allAtomMatchingTrackers, oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, matcherWizard, allConditionTrackers, conditionTrackerMap,
+ wizard, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
return {producer};
}
-bool updateMetrics(const ConfigKey& key, const StatsdConfig& config, const int64_t timeBaseNs,
- const int64_t currentTimeNs, const sp<StatsPullerManager>& pullerManager,
- const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
- const unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
- const set<int64_t>& replacedMatchers,
- const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- const unordered_map<int64_t, int>& conditionTrackerMap,
- const set<int64_t>& replacedConditions,
- vector<sp<ConditionTracker>>& allConditionTrackers,
- const vector<ConditionState>& initialConditionCache,
- const unordered_map<int64_t, int>& stateAtomIdMap,
- const unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
- const set<int64_t>& replacedStates,
- const unordered_map<int64_t, int>& oldMetricProducerMap,
- const vector<sp<MetricProducer>>& oldMetricProducers,
- unordered_map<int64_t, int>& newMetricProducerMap,
- vector<sp<MetricProducer>>& newMetricProducers,
- unordered_map<int, vector<int>>& conditionToMetricMap,
- unordered_map<int, vector<int>>& trackerToMetricMap,
- set<int64_t>& noReportMetricIds,
- unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
- unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation, set<int64_t>& replacedMetrics) {
+optional<InvalidConfigReason> updateMetrics(
+ const ConfigKey& key, const StatsdConfig& config, const int64_t timeBaseNs,
+ const int64_t currentTimeNs, const sp<StatsPullerManager>& pullerManager,
+ const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
+ const unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
+ const set<int64_t>& replacedMatchers,
+ const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ const unordered_map<int64_t, int>& conditionTrackerMap,
+ const set<int64_t>& replacedConditions, vector<sp<ConditionTracker>>& allConditionTrackers,
+ const vector<ConditionState>& initialConditionCache,
+ const unordered_map<int64_t, int>& stateAtomIdMap,
+ const unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
+ const set<int64_t>& replacedStates, const unordered_map<int64_t, int>& oldMetricProducerMap,
+ const vector<sp<MetricProducer>>& oldMetricProducers,
+ unordered_map<int64_t, int>& newMetricProducerMap,
+ vector<sp<MetricProducer>>& newMetricProducers,
+ unordered_map<int, vector<int>>& conditionToMetricMap,
+ unordered_map<int, vector<int>>& trackerToMetricMap, set<int64_t>& noReportMetricIds,
+ unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
+ unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
+ vector<int>& metricsWithActivation, set<int64_t>& replacedMetrics) {
sp<ConditionWizard> wizard = new ConditionWizard(allConditionTrackers);
sp<EventMatcherWizard> matcherWizard = new EventMatcherWizard(allAtomMatchingTrackers);
const int allMetricsCount = config.count_metric_size() + config.duration_metric_size() +
config.event_metric_size() + config.gauge_metric_size() +
config.value_metric_size() + config.kll_metric_size();
newMetricProducers.reserve(allMetricsCount);
+ optional<InvalidConfigReason> invalidConfigReason;
+
+ if (config.has_restricted_metrics_delegate_package_name() &&
+ allMetricsCount != config.event_metric_size()) {
+ ALOGE("Restricted metrics only support event metric");
+ return InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED);
+ }
// Construct map from metric id to metric activation index. The map will be used to determine
// the metric activation corresponding to a metric.
@@ -692,16 +758,18 @@
int64_t metricId = metricActivation.metric_id();
if (metricToActivationMap.find(metricId) != metricToActivationMap.end()) {
ALOGE("Metric %lld has multiple MetricActivations", (long long)metricId);
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_HAS_MULTIPLE_ACTIVATIONS,
+ metricId);
}
metricToActivationMap.insert({metricId, i});
}
vector<UpdateStatus> metricsToUpdate(allMetricsCount, UPDATE_UNKNOWN);
- if (!determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap, replacedMatchers,
- replacedConditions, replacedStates, metricsToUpdate)) {
- return false;
+ invalidConfigReason = determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ replacedMatchers, replacedConditions, replacedStates, metricsToUpdate);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
// Now, perform the update. Must iterate the metric types in the same order
@@ -718,7 +786,8 @@
allConditionTrackers, conditionTrackerMap, wizard, oldMetricProducerMap,
oldMetricProducers, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
case UPDATE_REPLACE:
@@ -731,17 +800,19 @@
conditionTrackerMap, initialConditionCache, wizard, stateAtomIdMap,
allStateGroupMaps, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
default: {
ALOGE("Metric \"%lld\" update state is unknown. This should never happen",
(long long)metric.id());
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_UPDATE_STATUS_UNKNOWN,
+ metric.id());
}
}
if (!producer) {
- return false;
+ return invalidConfigReason;
}
newMetricProducers.push_back(producer.value());
}
@@ -757,7 +828,8 @@
allConditionTrackers, conditionTrackerMap, wizard, oldMetricProducerMap,
oldMetricProducers, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
case UPDATE_REPLACE:
@@ -770,17 +842,19 @@
conditionTrackerMap, initialConditionCache, wizard, stateAtomIdMap,
allStateGroupMaps, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
default: {
ALOGE("Metric \"%lld\" update state is unknown. This should never happen",
(long long)metric.id());
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_UPDATE_STATUS_UNKNOWN,
+ metric.id());
}
}
if (!producer) {
- return false;
+ return invalidConfigReason;
}
newMetricProducers.push_back(producer.value());
}
@@ -796,7 +870,8 @@
allConditionTrackers, conditionTrackerMap, wizard, oldMetricProducerMap,
oldMetricProducers, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
case UPDATE_REPLACE:
@@ -808,17 +883,19 @@
newAtomMatchingTrackerMap, allConditionTrackers, conditionTrackerMap,
initialConditionCache, wizard, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
default: {
ALOGE("Metric \"%lld\" update state is unknown. This should never happen",
(long long)metric.id());
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_UPDATE_STATUS_UNKNOWN,
+ metric.id());
}
}
if (!producer) {
- return false;
+ return invalidConfigReason;
}
newMetricProducers.push_back(producer.value());
}
@@ -835,7 +912,8 @@
allConditionTrackers, conditionTrackerMap, wizard, oldMetricProducerMap,
oldMetricProducers, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
case UPDATE_REPLACE:
@@ -848,17 +926,19 @@
conditionTrackerMap, initialConditionCache, wizard, matcherWizard,
stateAtomIdMap, allStateGroupMaps, metricToActivationMap,
trackerToMetricMap, conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
default: {
ALOGE("Metric \"%lld\" update state is unknown. This should never happen",
(long long)metric.id());
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_UPDATE_STATUS_UNKNOWN,
+ metric.id());
}
}
if (!producer) {
- return false;
+ return invalidConfigReason;
}
newMetricProducers.push_back(producer.value());
}
@@ -875,7 +955,8 @@
allConditionTrackers, conditionTrackerMap, wizard, oldMetricProducerMap,
oldMetricProducers, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
case UPDATE_REPLACE:
@@ -888,17 +969,18 @@
conditionTrackerMap, initialConditionCache, wizard, matcherWizard,
metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation);
+ metricsWithActivation, invalidConfigReason);
break;
}
default: {
ALOGE("Metric \"%lld\" update state is unknown. This should never happen",
(long long)metric.id());
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_UPDATE_STATUS_UNKNOWN,
+ metric.id());
}
}
if (!producer) {
- return false;
+ return invalidConfigReason;
}
newMetricProducers.push_back(producer.value());
}
@@ -915,7 +997,8 @@
allConditionTrackers, conditionTrackerMap, wizard, oldMetricProducerMap,
oldMetricProducers, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
case UPDATE_REPLACE:
@@ -929,17 +1012,19 @@
conditionTrackerMap, initialConditionCache, wizard, matcherWizard,
stateAtomIdMap, allStateGroupMaps, metricToActivationMap,
trackerToMetricMap, conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ invalidConfigReason);
break;
}
default: {
ALOGE("Metric \"%lld\" update state is unknown. This should never happen",
(long long)metric.id());
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_UPDATE_STATUS_UNKNOWN,
+ metric.id());
}
}
if (!producer) {
- return false;
+ return invalidConfigReason;
}
newMetricProducers.push_back(producer.value());
}
@@ -948,7 +1033,8 @@
const int64_t noReportMetric = config.no_report_metric(i);
if (newMetricProducerMap.find(noReportMetric) == newMetricProducerMap.end()) {
ALOGW("no_report_metric %" PRId64 " not exist", noReportMetric);
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_NO_REPORT_METRIC_NOT_FOUND,
+ noReportMetric);
}
noReportMetricIds.insert(noReportMetric);
}
@@ -962,7 +1048,9 @@
// Using atoms allowed from any uid as a sliced state atom is not allowed.
// Redo this check for all metrics in case the atoms allowed from any uid changed.
if (atomsAllowedFromAnyUid.find(atomId) != atomsAllowedFromAnyUid.end()) {
- return false;
+ return InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_SLICED_STATE_ATOM_ALLOWED_FROM_ANY_UID,
+ producer->getMetricId());
// Preserved metrics should've already registered.`
} else if (metricsToUpdate[i] != UPDATE_PRESERVE) {
StateManager::getInstance().registerListener(atomId, producer);
@@ -976,62 +1064,75 @@
newMetricProducers[i]->prepareFirstBucket();
}
}
- return true;
+
+ for (const sp<MetricProducer>& oldMetricProducer : oldMetricProducers) {
+ const auto& it = newMetricProducerMap.find(oldMetricProducer->getMetricId());
+ // Consider metric removed if it's not present in newMetricProducerMap or it's replaced.
+ if (it == newMetricProducerMap.end() ||
+ replacedMetrics.find(oldMetricProducer->getMetricId()) != replacedMetrics.end()) {
+ oldMetricProducer->onMetricRemove();
+ }
+ }
+ return nullopt;
}
-bool determineAlertUpdateStatus(const Alert& alert,
- const unordered_map<int64_t, int>& oldAlertTrackerMap,
- const vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
- const set<int64_t>& replacedMetrics, UpdateStatus& updateStatus) {
+optional<InvalidConfigReason> determineAlertUpdateStatus(
+ const Alert& alert, const unordered_map<int64_t, int>& oldAlertTrackerMap,
+ const vector<sp<AnomalyTracker>>& oldAnomalyTrackers, const set<int64_t>& replacedMetrics,
+ UpdateStatus& updateStatus) {
// Check if new alert.
const auto& oldAnomalyTrackerIt = oldAlertTrackerMap.find(alert.id());
if (oldAnomalyTrackerIt == oldAlertTrackerMap.end()) {
updateStatus = UPDATE_NEW;
- return true;
+ return nullopt;
}
// This is an existing alert, check if it has changed.
string serializedAlert;
if (!alert.SerializeToString(&serializedAlert)) {
ALOGW("Unable to serialize alert %lld", (long long)alert.id());
- return false;
+ return createInvalidConfigReasonWithAlert(INVALID_CONFIG_REASON_ALERT_SERIALIZATION_FAILED,
+ alert.id());
}
uint64_t newProtoHash = Hash64(serializedAlert);
- const auto [success, oldProtoHash] =
+ const auto [invalidConfigReason, oldProtoHash] =
oldAnomalyTrackers[oldAnomalyTrackerIt->second]->getProtoHash();
- if (!success) {
- return false;
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
if (newProtoHash != oldProtoHash) {
updateStatus = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
// Check if the metric this alert relies on has changed.
if (replacedMetrics.find(alert.metric_id()) != replacedMetrics.end()) {
updateStatus = UPDATE_REPLACE;
- return true;
+ return nullopt;
}
updateStatus = UPDATE_PRESERVE;
- return true;
+ return nullopt;
}
-bool updateAlerts(const StatsdConfig& config, const int64_t currentTimeNs,
- const unordered_map<int64_t, int>& metricProducerMap,
- const set<int64_t>& replacedMetrics,
- const unordered_map<int64_t, int>& oldAlertTrackerMap,
- const vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- vector<sp<MetricProducer>>& allMetricProducers,
- unordered_map<int64_t, int>& newAlertTrackerMap,
- vector<sp<AnomalyTracker>>& newAnomalyTrackers) {
+optional<InvalidConfigReason> updateAlerts(const StatsdConfig& config, const int64_t currentTimeNs,
+ const unordered_map<int64_t, int>& metricProducerMap,
+ const set<int64_t>& replacedMetrics,
+ const unordered_map<int64_t, int>& oldAlertTrackerMap,
+ const vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
+ const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ vector<sp<MetricProducer>>& allMetricProducers,
+ unordered_map<int64_t, int>& newAlertTrackerMap,
+ vector<sp<AnomalyTracker>>& newAnomalyTrackers) {
int alertCount = config.alert_size();
vector<UpdateStatus> alertUpdateStatuses(alertCount);
+ optional<InvalidConfigReason> invalidConfigReason;
for (int i = 0; i < alertCount; i++) {
- if (!determineAlertUpdateStatus(config.alert(i), oldAlertTrackerMap, oldAnomalyTrackers,
- replacedMetrics, alertUpdateStatuses[i])) {
- return false;
+ invalidConfigReason =
+ determineAlertUpdateStatus(config.alert(i), oldAlertTrackerMap, oldAnomalyTrackers,
+ replacedMetrics, alertUpdateStatuses[i]);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
}
@@ -1046,7 +1147,8 @@
ALOGW("Could not find AnomalyTracker %lld in the previous config, but "
"expected it to be there",
(long long)alert.id());
- return false;
+ return createInvalidConfigReasonWithAlert(
+ INVALID_CONFIG_REASON_ALERT_NOT_IN_PREV_CONFIG, alert.id());
}
sp<AnomalyTracker> anomalyTracker = oldAnomalyTrackers[oldAnomalyTrackerIt->second];
anomalyTracker->onConfigUpdated();
@@ -1055,7 +1157,9 @@
if (metricProducerIt == metricProducerMap.end()) {
ALOGW("alert \"%lld\" has unknown metric id: \"%lld\"", (long long)alert.id(),
(long long)alert.metric_id());
- return false;
+ return createInvalidConfigReasonWithAlert(
+ INVALID_CONFIG_REASON_ALERT_METRIC_NOT_FOUND, alert.metric_id(),
+ alert.id());
}
allMetricProducers[metricProducerIt->second]->addAnomalyTracker(anomalyTracker,
currentTimeNs);
@@ -1064,11 +1168,11 @@
}
case UPDATE_REPLACE:
case UPDATE_NEW: {
- optional<sp<AnomalyTracker>> anomalyTracker =
- createAnomalyTracker(alert, anomalyAlarmMonitor, alertUpdateStatuses[i],
- currentTimeNs, metricProducerMap, allMetricProducers);
+ optional<sp<AnomalyTracker>> anomalyTracker = createAnomalyTracker(
+ alert, anomalyAlarmMonitor, alertUpdateStatuses[i], currentTimeNs,
+ metricProducerMap, allMetricProducers, invalidConfigReason);
if (!anomalyTracker) {
- return false;
+ return invalidConfigReason;
}
newAnomalyTrackers.push_back(anomalyTracker.value());
break;
@@ -1076,48 +1180,49 @@
default: {
ALOGE("Alert \"%lld\" update state is unknown. This should never happen",
(long long)alert.id());
- return false;
+ return createInvalidConfigReasonWithAlert(
+ INVALID_CONFIG_REASON_ALERT_UPDATE_STATUS_UNKNOWN, alert.id());
}
}
}
- if (!initSubscribersForSubscriptionType(config, Subscription::ALERT, newAlertTrackerMap,
- newAnomalyTrackers)) {
- return false;
+ invalidConfigReason = initSubscribersForSubscriptionType(
+ config, Subscription::ALERT, newAlertTrackerMap, newAnomalyTrackers);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
- return true;
+ return nullopt;
}
-bool updateStatsdConfig(const ConfigKey& key, const StatsdConfig& config, const sp<UidMap>& uidMap,
- const sp<StatsPullerManager>& pullerManager,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
- const int64_t currentTimeNs,
- const vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
- const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
- const vector<sp<ConditionTracker>>& oldConditionTrackers,
- const unordered_map<int64_t, int>& oldConditionTrackerMap,
- const vector<sp<MetricProducer>>& oldMetricProducers,
- const unordered_map<int64_t, int>& oldMetricProducerMap,
- const vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
- const unordered_map<int64_t, int>& oldAlertTrackerMap,
- const map<int64_t, uint64_t>& oldStateProtoHashes, set<int>& allTagIds,
- vector<sp<AtomMatchingTracker>>& newAtomMatchingTrackers,
- unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
- vector<sp<ConditionTracker>>& newConditionTrackers,
- unordered_map<int64_t, int>& newConditionTrackerMap,
- vector<sp<MetricProducer>>& newMetricProducers,
- unordered_map<int64_t, int>& newMetricProducerMap,
- vector<sp<AnomalyTracker>>& newAnomalyTrackers,
- unordered_map<int64_t, int>& newAlertTrackerMap,
- vector<sp<AlarmTracker>>& newPeriodicAlarmTrackers,
- unordered_map<int, vector<int>>& conditionToMetricMap,
- unordered_map<int, vector<int>>& trackerToMetricMap,
- unordered_map<int, vector<int>>& trackerToConditionMap,
- unordered_map<int, vector<int>>& activationTrackerToMetricMap,
- unordered_map<int, vector<int>>& deactivationTrackerToMetricMap,
- vector<int>& metricsWithActivation,
- map<int64_t, uint64_t>& newStateProtoHashes,
- set<int64_t>& noReportMetricIds) {
+optional<InvalidConfigReason> updateStatsdConfig(
+ const ConfigKey& key, const StatsdConfig& config, const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerManager, const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
+ const int64_t currentTimeNs, const vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
+ const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
+ const vector<sp<ConditionTracker>>& oldConditionTrackers,
+ const unordered_map<int64_t, int>& oldConditionTrackerMap,
+ const vector<sp<MetricProducer>>& oldMetricProducers,
+ const unordered_map<int64_t, int>& oldMetricProducerMap,
+ const vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
+ const unordered_map<int64_t, int>& oldAlertTrackerMap,
+ const map<int64_t, uint64_t>& oldStateProtoHashes,
+ std::unordered_map<int, std::vector<int>>& allTagIdsToMatchersMap,
+ vector<sp<AtomMatchingTracker>>& newAtomMatchingTrackers,
+ unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
+ vector<sp<ConditionTracker>>& newConditionTrackers,
+ unordered_map<int64_t, int>& newConditionTrackerMap,
+ vector<sp<MetricProducer>>& newMetricProducers,
+ unordered_map<int64_t, int>& newMetricProducerMap,
+ vector<sp<AnomalyTracker>>& newAnomalyTrackers,
+ unordered_map<int64_t, int>& newAlertTrackerMap,
+ vector<sp<AlarmTracker>>& newPeriodicAlarmTrackers,
+ unordered_map<int, vector<int>>& conditionToMetricMap,
+ unordered_map<int, vector<int>>& trackerToMetricMap,
+ unordered_map<int, vector<int>>& trackerToConditionMap,
+ unordered_map<int, vector<int>>& activationTrackerToMetricMap,
+ unordered_map<int, vector<int>>& deactivationTrackerToMetricMap,
+ vector<int>& metricsWithActivation, map<int64_t, uint64_t>& newStateProtoHashes,
+ set<int64_t>& noReportMetricIds) {
set<int64_t> replacedMatchers;
set<int64_t> replacedConditions;
set<int64_t> replacedStates;
@@ -1129,55 +1234,63 @@
if (config.package_certificate_hash_size_bytes() > UINT8_MAX) {
ALOGE("Invalid value for package_certificate_hash_size_bytes: %d",
config.package_certificate_hash_size_bytes());
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_PACKAGE_CERT_HASH_SIZE_TOO_LARGE);
}
- if (!updateAtomMatchingTrackers(config, uidMap, oldAtomMatchingTrackerMap,
- oldAtomMatchingTrackers, allTagIds, newAtomMatchingTrackerMap,
- newAtomMatchingTrackers, replacedMatchers)) {
+ optional<InvalidConfigReason> invalidConfigReason = updateAtomMatchingTrackers(
+ config, uidMap, oldAtomMatchingTrackerMap, oldAtomMatchingTrackers,
+ allTagIdsToMatchersMap, newAtomMatchingTrackerMap, newAtomMatchingTrackers,
+ replacedMatchers);
+ if (invalidConfigReason.has_value()) {
ALOGE("updateAtomMatchingTrackers failed");
- return false;
+ return invalidConfigReason;
}
- if (!updateConditions(key, config, newAtomMatchingTrackerMap, replacedMatchers,
- oldConditionTrackerMap, oldConditionTrackers, newConditionTrackerMap,
- newConditionTrackers, trackerToConditionMap, conditionCache,
- replacedConditions)) {
+ invalidConfigReason = updateConditions(
+ key, config, newAtomMatchingTrackerMap, replacedMatchers, oldConditionTrackerMap,
+ oldConditionTrackers, newConditionTrackerMap, newConditionTrackers,
+ trackerToConditionMap, conditionCache, replacedConditions);
+ if (invalidConfigReason.has_value()) {
ALOGE("updateConditions failed");
- return false;
+ return invalidConfigReason;
}
- if (!updateStates(config, oldStateProtoHashes, stateAtomIdMap, allStateGroupMaps,
- newStateProtoHashes, replacedStates)) {
+ invalidConfigReason = updateStates(config, oldStateProtoHashes, stateAtomIdMap,
+ allStateGroupMaps, newStateProtoHashes, replacedStates);
+ if (invalidConfigReason.has_value()) {
ALOGE("updateStates failed");
- return false;
+ return invalidConfigReason;
}
- if (!updateMetrics(key, config, timeBaseNs, currentTimeNs, pullerManager,
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, replacedMatchers,
- newAtomMatchingTrackers, newConditionTrackerMap, replacedConditions,
- newConditionTrackers, conditionCache, stateAtomIdMap, allStateGroupMaps,
- replacedStates, oldMetricProducerMap, oldMetricProducers,
- newMetricProducerMap, newMetricProducers, conditionToMetricMap,
- trackerToMetricMap, noReportMetricIds, activationTrackerToMetricMap,
- deactivationTrackerToMetricMap, metricsWithActivation, replacedMetrics)) {
+
+ invalidConfigReason = updateMetrics(
+ key, config, timeBaseNs, currentTimeNs, pullerManager, oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, replacedConditions, newConditionTrackers, conditionCache,
+ stateAtomIdMap, allStateGroupMaps, replacedStates, oldMetricProducerMap,
+ oldMetricProducers, newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationTrackerToMetricMap,
+ deactivationTrackerToMetricMap, metricsWithActivation, replacedMetrics);
+ if (invalidConfigReason.has_value()) {
ALOGE("updateMetrics failed");
- return false;
+ return invalidConfigReason;
}
- if (!updateAlerts(config, currentTimeNs, newMetricProducerMap, replacedMetrics,
- oldAlertTrackerMap, oldAnomalyTrackers, anomalyAlarmMonitor,
- newMetricProducers, newAlertTrackerMap, newAnomalyTrackers)) {
+ invalidConfigReason = updateAlerts(config, currentTimeNs, newMetricProducerMap, replacedMetrics,
+ oldAlertTrackerMap, oldAnomalyTrackers, anomalyAlarmMonitor,
+ newMetricProducers, newAlertTrackerMap, newAnomalyTrackers);
+ if (invalidConfigReason.has_value()) {
ALOGE("updateAlerts failed");
- return false;
+ return invalidConfigReason;
}
+ invalidConfigReason = initAlarms(config, key, periodicAlarmMonitor, timeBaseNs, currentTimeNs,
+ newPeriodicAlarmTrackers);
// Alarms do not have any state, so we can reuse the initialization logic.
- if (!initAlarms(config, key, periodicAlarmMonitor, timeBaseNs, currentTimeNs,
- newPeriodicAlarmTrackers)) {
+ if (invalidConfigReason.has_value()) {
ALOGE("initAlarms failed");
- return false;
+ return invalidConfigReason;
}
- return true;
+ return nullopt;
}
} // namespace statsd
diff --git a/statsd/src/metrics/parsing_utils/config_update_utils.h b/statsd/src/metrics/parsing_utils/config_update_utils.h
index 24f8c85..3bd1e80 100644
--- a/statsd/src/metrics/parsing_utils/config_update_utils.h
+++ b/statsd/src/metrics/parsing_utils/config_update_utils.h
@@ -44,8 +44,8 @@
// [matchersToUpdate]: vector of the update status of each matcher. The matcherIdx index will
// be updated from UPDATE_UNKNOWN after this call.
// [cycleTracker]: intermediate param used during recursion.
-// Returns whether the function was successful or not.
-bool determineMatcherUpdateStatus(
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> determineMatcherUpdateStatus(
const StatsdConfig& config, const int matcherIdx,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
const std::vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
@@ -58,17 +58,19 @@
// [oldAtomMatchingTrackerMap]: existing matcher id to index mapping
// [oldAtomMatchingTrackers]: stores the existing AtomMatchingTrackers
// output:
-// [allTagIds]: contains the set of all interesting tag ids to this config.
+// [allTagIdsToMatchersMap]: maps of tag ids to atom matchers
// [newAtomMatchingTrackerMap]: new matcher id to index mapping
// [newAtomMatchingTrackers]: stores the new AtomMatchingTrackers
// [replacedMatchers]: set of matcher ids that changed and have been replaced
-bool updateAtomMatchingTrackers(const StatsdConfig& config, const sp<UidMap>& uidMap,
- const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
- const std::vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
- std::set<int>& allTagIds,
- std::unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
- std::vector<sp<AtomMatchingTracker>>& newAtomMatchingTrackers,
- std::set<int64_t>& replacedMatchers);
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> updateAtomMatchingTrackers(
+ const StatsdConfig& config, const sp<UidMap>& uidMap,
+ const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
+ const std::vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
+ std::unordered_map<int, std::vector<int>>& allTagIdsToMatchersMap,
+ std::unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
+ std::vector<sp<AtomMatchingTracker>>& newAtomMatchingTrackers,
+ std::set<int64_t>& replacedMatchers);
// Recursive function to determine if a condition needs to be updated.
// input:
@@ -82,14 +84,14 @@
// [conditionsToUpdate]: vector of the update status of each condition. The conditionIdx index will
// be updated from UPDATE_UNKNOWN after this call.
// [cycleTracker]: intermediate param used during recursion.
-// Returns whether the function was successful or not.
-bool determineConditionUpdateStatus(const StatsdConfig& config, const int conditionIdx,
- const std::unordered_map<int64_t, int>& oldConditionTrackerMap,
- const std::vector<sp<ConditionTracker>>& oldConditionTrackers,
- const std::unordered_map<int64_t, int>& newConditionTrackerMap,
- const std::set<int64_t>& replacedMatchers,
- std::vector<UpdateStatus>& conditionsToUpdate,
- std::vector<bool>& cycleTracker);
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> determineConditionUpdateStatus(
+ const StatsdConfig& config, const int conditionIdx,
+ const std::unordered_map<int64_t, int>& oldConditionTrackerMap,
+ const std::vector<sp<ConditionTracker>>& oldConditionTrackers,
+ const std::unordered_map<int64_t, int>& newConditionTrackerMap,
+ const std::set<int64_t>& replacedMatchers, std::vector<UpdateStatus>& conditionsToUpdate,
+ std::vector<bool>& cycleTracker);
// Updates ConditionTrackers
// input:
@@ -105,23 +107,23 @@
// to indices of condition trackers that use the matcher
// [conditionCache]: stores the current conditions for each ConditionTracker
// [replacedConditions]: set of condition ids that have changed and have been replaced
-bool updateConditions(const ConfigKey& key, const StatsdConfig& config,
- const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
- const std::set<int64_t>& replacedMatchers,
- const std::unordered_map<int64_t, int>& oldConditionTrackerMap,
- const std::vector<sp<ConditionTracker>>& oldConditionTrackers,
- std::unordered_map<int64_t, int>& newConditionTrackerMap,
- std::vector<sp<ConditionTracker>>& newConditionTrackers,
- std::unordered_map<int, std::vector<int>>& trackerToConditionMap,
- std::vector<ConditionState>& conditionCache,
- std::set<int64_t>& replacedConditions);
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> updateConditions(
+ const ConfigKey& key, const StatsdConfig& config,
+ const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ const std::set<int64_t>& replacedMatchers,
+ const std::unordered_map<int64_t, int>& oldConditionTrackerMap,
+ const std::vector<sp<ConditionTracker>>& oldConditionTrackers,
+ std::unordered_map<int64_t, int>& newConditionTrackerMap,
+ std::vector<sp<ConditionTracker>>& newConditionTrackers,
+ std::unordered_map<int, std::vector<int>>& trackerToConditionMap,
+ std::vector<ConditionState>& conditionCache, std::set<int64_t>& replacedConditions);
-bool updateStates(const StatsdConfig& config,
- const std::map<int64_t, uint64_t>& oldStateProtoHashes,
- std::unordered_map<int64_t, int>& stateAtomIdMap,
- std::unordered_map<int64_t, std::unordered_map<int, int64_t>>& allStateGroupMaps,
- std::map<int64_t, uint64_t>& newStateProtoHashes,
- std::set<int64_t>& replacedStates);
+optional<InvalidConfigReason> updateStates(
+ const StatsdConfig& config, const std::map<int64_t, uint64_t>& oldStateProtoHashes,
+ std::unordered_map<int64_t, int>& stateAtomIdMap,
+ std::unordered_map<int64_t, std::unordered_map<int, int64_t>>& allStateGroupMaps,
+ std::map<int64_t, uint64_t>& newStateProtoHashes, std::set<int64_t>& replacedStates);
// Function to determine the update status (preserve/replace/new) of all metrics in the config.
// [config]: the input StatsdConfig
@@ -133,15 +135,13 @@
// [replacedStates]: set of replaced state ids. metrics using these states must be replaced
// output:
// [metricsToUpdate]: update status of each metric. Will be changed from UPDATE_UNKNOWN
-// Returns whether the function was successful or not.
-bool determineAllMetricUpdateStatuses(const StatsdConfig& config,
- const unordered_map<int64_t, int>& oldMetricProducerMap,
- const vector<sp<MetricProducer>>& oldMetricProducers,
- const unordered_map<int64_t, int>& metricToActivationMap,
- const set<int64_t>& replacedMatchers,
- const set<int64_t>& replacedConditions,
- const set<int64_t>& replacedStates,
- vector<UpdateStatus>& metricsToUpdate);
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> determineAllMetricUpdateStatuses(
+ const StatsdConfig& config, const unordered_map<int64_t, int>& oldMetricProducerMap,
+ const vector<sp<MetricProducer>>& oldMetricProducers,
+ const unordered_map<int64_t, int>& metricToActivationMap,
+ const set<int64_t>& replacedMatchers, const set<int64_t>& replacedConditions,
+ const set<int64_t>& replacedStates, vector<UpdateStatus>& metricsToUpdate);
// Update MetricProducers.
// input:
@@ -162,7 +162,8 @@
// [conditionToMetricMap]: contains the mapping from condition tracker index to
// the list of MetricProducer index
// [trackerToMetricMap]: contains the mapping from log tracker to MetricProducer index.
-bool updateMetrics(
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> updateMetrics(
const ConfigKey& key, const StatsdConfig& config, const int64_t timeBaseNs,
const int64_t currentTimeNs, const sp<StatsPullerManager>& pullerManager,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -194,12 +195,11 @@
// [replacedMetrics]: set of replaced metric ids. alerts using these metrics must be replaced
// output:
// [updateStatus]: update status of the alert. Will be changed from UPDATE_UNKNOWN
-// Returns whether the function was successful or not.
-bool determineAlertUpdateStatus(const Alert& alert,
- const std::unordered_map<int64_t, int>& oldAlertTrackerMap,
- const std::vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
- const std::set<int64_t>& replacedMetrics,
- UpdateStatus& updateStatus);
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> determineAlertUpdateStatus(
+ const Alert& alert, const std::unordered_map<int64_t, int>& oldAlertTrackerMap,
+ const std::vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
+ const std::set<int64_t>& replacedMetrics, UpdateStatus& updateStatus);
// Update MetricProducers.
// input:
@@ -217,50 +217,51 @@
// output:
// [newAlertTrackerMap]: mapping of alert id to index in the new config
// [newAnomalyTrackers]: contains the list of sp to the AnomalyTrackers created.
-bool updateAlerts(const StatsdConfig& config, const int64_t currentTimeNs,
- const std::unordered_map<int64_t, int>& metricProducerMap,
- const std::set<int64_t>& replacedMetrics,
- const std::unordered_map<int64_t, int>& oldAlertTrackerMap,
- const std::vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- std::vector<sp<MetricProducer>>& allMetricProducers,
- std::unordered_map<int64_t, int>& newAlertTrackerMap,
- std::vector<sp<AnomalyTracker>>& newAnomalyTrackers);
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> updateAlerts(
+ const StatsdConfig& config, const int64_t currentTimeNs,
+ const std::unordered_map<int64_t, int>& metricProducerMap,
+ const std::set<int64_t>& replacedMetrics,
+ const std::unordered_map<int64_t, int>& oldAlertTrackerMap,
+ const std::vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
+ const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ std::vector<sp<MetricProducer>>& allMetricProducers,
+ std::unordered_map<int64_t, int>& newAlertTrackerMap,
+ std::vector<sp<AnomalyTracker>>& newAnomalyTrackers);
// Updates the existing MetricsManager from a new StatsdConfig.
// Parameters are the members of MetricsManager. See MetricsManager for declaration.
-bool updateStatsdConfig(const ConfigKey& key, const StatsdConfig& config, const sp<UidMap>& uidMap,
- const sp<StatsPullerManager>& pullerManager,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
- const int64_t currentTimeNs,
- const std::vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
- const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
- const std::vector<sp<ConditionTracker>>& oldConditionTrackers,
- const std::unordered_map<int64_t, int>& oldConditionTrackerMap,
- const std::vector<sp<MetricProducer>>& oldMetricProducers,
- const std::unordered_map<int64_t, int>& oldMetricProducerMap,
- const std::vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
- const std::unordered_map<int64_t, int>& oldAlertTrackerMap,
- const std::map<int64_t, uint64_t>& oldStateProtoHashes,
- std::set<int>& allTagIds,
- std::vector<sp<AtomMatchingTracker>>& newAtomMatchingTrackers,
- std::unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
- std::vector<sp<ConditionTracker>>& newConditionTrackers,
- std::unordered_map<int64_t, int>& newConditionTrackerMap,
- std::vector<sp<MetricProducer>>& newMetricProducers,
- std::unordered_map<int64_t, int>& newMetricProducerMap,
- std::vector<sp<AnomalyTracker>>& newAlertTrackers,
- std::unordered_map<int64_t, int>& newAlertTrackerMap,
- std::vector<sp<AlarmTracker>>& newPeriodicAlarmTrackers,
- std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
- std::unordered_map<int, std::vector<int>>& trackerToMetricMap,
- std::unordered_map<int, std::vector<int>>& trackerToConditionMap,
- std::unordered_map<int, std::vector<int>>& activationTrackerToMetricMap,
- std::unordered_map<int, std::vector<int>>& deactivationTrackerToMetricMap,
- std::vector<int>& metricsWithActivation,
- std::map<int64_t, uint64_t>& newStateProtoHashes,
- std::set<int64_t>& noReportMetricIds);
+optional<InvalidConfigReason> updateStatsdConfig(
+ const ConfigKey& key, const StatsdConfig& config, const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerManager, const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
+ const int64_t currentTimeNs,
+ const std::vector<sp<AtomMatchingTracker>>& oldAtomMatchingTrackers,
+ const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
+ const std::vector<sp<ConditionTracker>>& oldConditionTrackers,
+ const std::unordered_map<int64_t, int>& oldConditionTrackerMap,
+ const std::vector<sp<MetricProducer>>& oldMetricProducers,
+ const std::unordered_map<int64_t, int>& oldMetricProducerMap,
+ const std::vector<sp<AnomalyTracker>>& oldAnomalyTrackers,
+ const std::unordered_map<int64_t, int>& oldAlertTrackerMap,
+ const std::map<int64_t, uint64_t>& oldStateProtoHashes,
+ std::unordered_map<int, std::vector<int>>& allTagIdsToMatchersMap,
+ std::vector<sp<AtomMatchingTracker>>& newAtomMatchingTrackers,
+ std::unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
+ std::vector<sp<ConditionTracker>>& newConditionTrackers,
+ std::unordered_map<int64_t, int>& newConditionTrackerMap,
+ std::vector<sp<MetricProducer>>& newMetricProducers,
+ std::unordered_map<int64_t, int>& newMetricProducerMap,
+ std::vector<sp<AnomalyTracker>>& newAlertTrackers,
+ std::unordered_map<int64_t, int>& newAlertTrackerMap,
+ std::vector<sp<AlarmTracker>>& newPeriodicAlarmTrackers,
+ std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
+ std::unordered_map<int, std::vector<int>>& trackerToMetricMap,
+ std::unordered_map<int, std::vector<int>>& trackerToConditionMap,
+ std::unordered_map<int, std::vector<int>>& activationTrackerToMetricMap,
+ std::unordered_map<int, std::vector<int>>& deactivationTrackerToMetricMap,
+ std::vector<int>& metricsWithActivation, std::map<int64_t, uint64_t>& newStateProtoHashes,
+ std::set<int64_t>& noReportMetricIds);
} // namespace statsd
} // namespace os
diff --git a/statsd/src/metrics/parsing_utils/metrics_manager_util.cpp b/statsd/src/metrics/parsing_utils/metrics_manager_util.cpp
index 9354e10..7d2ed03 100644
--- a/statsd/src/metrics/parsing_utils/metrics_manager_util.cpp
+++ b/statsd/src/metrics/parsing_utils/metrics_manager_util.cpp
@@ -37,6 +37,7 @@
#include "metrics/KllMetricProducer.h"
#include "metrics/MetricProducer.h"
#include "metrics/NumericValueMetricProducer.h"
+#include "metrics/RestrictedEventMetricProducer.h"
#include "state/StateManager.h"
#include "stats_util.h"
@@ -65,32 +66,42 @@
} // namespace
-sp<AtomMatchingTracker> createAtomMatchingTracker(const AtomMatcher& logMatcher, const int index,
- const sp<UidMap>& uidMap) {
+sp<AtomMatchingTracker> createAtomMatchingTracker(
+ const AtomMatcher& logMatcher, const int index, const sp<UidMap>& uidMap,
+ optional<InvalidConfigReason>& invalidConfigReason) {
string serializedMatcher;
if (!logMatcher.SerializeToString(&serializedMatcher)) {
ALOGE("Unable to serialize matcher %lld", (long long)logMatcher.id());
+ invalidConfigReason = createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_SERIALIZATION_FAILED, logMatcher.id());
return nullptr;
}
uint64_t protoHash = Hash64(serializedMatcher);
switch (logMatcher.contents_case()) {
- case AtomMatcher::ContentsCase::kSimpleAtomMatcher:
- return new SimpleAtomMatchingTracker(logMatcher.id(), index, protoHash,
- logMatcher.simple_atom_matcher(), uidMap);
+ case AtomMatcher::ContentsCase::kSimpleAtomMatcher: {
+ sp<AtomMatchingTracker> simpleAtomMatcher = new SimpleAtomMatchingTracker(
+ logMatcher.id(), index, protoHash, logMatcher.simple_atom_matcher(), uidMap);
+ return simpleAtomMatcher;
+ }
case AtomMatcher::ContentsCase::kCombination:
return new CombinationAtomMatchingTracker(logMatcher.id(), index, protoHash);
default:
ALOGE("Matcher \"%lld\" malformed", (long long)logMatcher.id());
+ invalidConfigReason = createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_MALFORMED_CONTENTS_CASE, logMatcher.id());
return nullptr;
}
}
sp<ConditionTracker> createConditionTracker(
const ConfigKey& key, const Predicate& predicate, const int index,
- const unordered_map<int64_t, int>& atomMatchingTrackerMap) {
+ const unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ optional<InvalidConfigReason>& invalidConfigReason) {
string serializedPredicate;
if (!predicate.SerializeToString(&serializedPredicate)) {
ALOGE("Unable to serialize predicate %lld", (long long)predicate.id());
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_SERIALIZATION_FAILED, predicate.id());
return nullptr;
}
uint64_t protoHash = Hash64(serializedPredicate);
@@ -104,17 +115,19 @@
}
default:
ALOGE("Predicate \"%lld\" malformed", (long long)predicate.id());
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_MALFORMED_CONTENTS_CASE, predicate.id());
return nullptr;
}
}
-bool getMetricProtoHash(const StatsdConfig& config, const MessageLite& metric, const int64_t id,
- const unordered_map<int64_t, int>& metricToActivationMap,
- uint64_t& metricHash) {
+optional<InvalidConfigReason> getMetricProtoHash(
+ const StatsdConfig& config, const MessageLite& metric, const int64_t id,
+ const unordered_map<int64_t, int>& metricToActivationMap, uint64_t& metricHash) {
string serializedMetric;
if (!metric.SerializeToString(&serializedMetric)) {
ALOGE("Unable to serialize metric %lld", (long long)id);
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SERIALIZATION_FAILED, id);
}
metricHash = Hash64(serializedMetric);
@@ -125,54 +138,58 @@
const MetricActivation& activation = config.metric_activation(metricActivationIt->second);
if (!activation.SerializeToString(&serializedActivation)) {
ALOGE("Unable to serialize metric activation for metric %lld", (long long)id);
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_ACTIVATION_SERIALIZATION_FAILED,
+ id);
}
metricHash = Hash64(to_string(metricHash).append(to_string(Hash64(serializedActivation))));
}
- return true;
+ return nullopt;
}
-bool handleMetricWithAtomMatchingTrackers(
- const int64_t matcherId, const int metricIndex, const bool enforceOneAtom,
- const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+optional<InvalidConfigReason> handleMetricWithAtomMatchingTrackers(
+ const int64_t matcherId, const int64_t metricId, const int metricIndex,
+ const bool enforceOneAtom, const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const unordered_map<int64_t, int>& atomMatchingTrackerMap,
unordered_map<int, vector<int>>& trackerToMetricMap, int& logTrackerIndex) {
auto logTrackerIt = atomMatchingTrackerMap.find(matcherId);
if (logTrackerIt == atomMatchingTrackerMap.end()) {
ALOGW("cannot find the AtomMatcher \"%lld\" in config", (long long)matcherId);
- return false;
+ return createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_METRIC_MATCHER_NOT_FOUND,
+ metricId, matcherId);
}
if (enforceOneAtom && allAtomMatchingTrackers[logTrackerIt->second]->getAtomIds().size() > 1) {
ALOGE("AtomMatcher \"%lld\" has more than one tag ids. When a metric has dimension, "
"the \"what\" can only be about one atom type. trigger_event matchers can also only "
"be about one atom type.",
(long long)matcherId);
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_MATCHER_MORE_THAN_ONE_ATOM, metricId, matcherId);
}
logTrackerIndex = logTrackerIt->second;
auto& metric_list = trackerToMetricMap[logTrackerIndex];
metric_list.push_back(metricIndex);
- return true;
+ return nullopt;
}
-bool handleMetricWithConditions(
- const int64_t condition, const int metricIndex,
+optional<InvalidConfigReason> handleMetricWithConditions(
+ const int64_t condition, const int64_t metricId, const int metricIndex,
const unordered_map<int64_t, int>& conditionTrackerMap,
- const ::google::protobuf::RepeatedPtrField<::android::os::statsd::MetricConditionLink>&
- links,
+ const ::google::protobuf::RepeatedPtrField<MetricConditionLink>& links,
const vector<sp<ConditionTracker>>& allConditionTrackers, int& conditionIndex,
unordered_map<int, vector<int>>& conditionToMetricMap) {
auto condition_it = conditionTrackerMap.find(condition);
if (condition_it == conditionTrackerMap.end()) {
ALOGW("cannot find Predicate \"%lld\" in the config", (long long)condition);
- return false;
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_METRIC_CONDITION_NOT_FOUND, metricId, condition);
}
-
for (const auto& link : links) {
auto it = conditionTrackerMap.find(link.condition());
if (it == conditionTrackerMap.end()) {
ALOGW("cannot find Predicate \"%lld\" in the config", (long long)link.condition());
- return false;
+ return createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_METRIC_CONDITION_LINK_NOT_FOUND, metricId,
+ link.condition());
}
}
conditionIndex = condition_it->second;
@@ -180,7 +197,7 @@
// will create new vector if not exist before.
auto& metricList = conditionToMetricMap[condition_it->second];
metricList.push_back(metricIndex);
- return true;
+ return nullopt;
}
// Initializes state data structures for a metric.
@@ -195,8 +212,9 @@
// [stateGroupMap]: this map should contain the mapping from states ids and state
// values to state group ids for all states that this metric
// is interested in
-bool handleMetricWithStates(
- const StatsdConfig& config, const ::google::protobuf::RepeatedField<int64_t>& stateIds,
+optional<InvalidConfigReason> handleMetricWithStates(
+ const StatsdConfig& config, const int64_t metricId,
+ const ::google::protobuf::RepeatedField<int64_t>& stateIds,
const unordered_map<int64_t, int>& stateAtomIdMap,
const unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
vector<int>& slicedStateAtoms,
@@ -205,7 +223,8 @@
auto it = stateAtomIdMap.find(stateId);
if (it == stateAtomIdMap.end()) {
ALOGW("cannot find State %" PRId64 " in the config", stateId);
- return false;
+ return createInvalidConfigReasonWithState(INVALID_CONFIG_REASON_METRIC_STATE_NOT_FOUND,
+ metricId, stateId);
}
int atomId = it->second;
slicedStateAtoms.push_back(atomId);
@@ -215,22 +234,64 @@
stateGroupMap[atomId] = stateIt->second;
}
}
- return true;
+ return nullopt;
}
-bool handleMetricWithStateLink(const FieldMatcher& stateMatcher,
- const vector<Matcher>& dimensionsInWhat) {
+optional<InvalidConfigReason> handleMetricWithStateLink(const int64_t metricId,
+ const FieldMatcher& stateMatcher,
+ const vector<Matcher>& dimensionsInWhat) {
vector<Matcher> stateMatchers;
translateFieldMatcher(stateMatcher, &stateMatchers);
+ if (!subsetDimensions(stateMatchers, dimensionsInWhat)) {
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_STATELINKS_NOT_SUBSET_DIM_IN_WHAT,
+ metricId);
+ }
+ return nullopt;
+}
- return subsetDimensions(stateMatchers, dimensionsInWhat);
+optional<InvalidConfigReason> handleMetricWithSampling(
+ const int64_t metricId, const DimensionalSamplingInfo& dimSamplingInfo,
+ const vector<Matcher>& dimensionsInWhat, SamplingInfo& samplingInfo) {
+ if (!dimSamplingInfo.has_sampled_what_field()) {
+ ALOGE("metric DimensionalSamplingInfo missing sampledWhatField");
+ return InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_DIMENSIONAL_SAMPLING_INFO_MISSING_SAMPLED_FIELD,
+ metricId);
+ }
+
+ if (dimSamplingInfo.shard_count() <= 1) {
+ ALOGE("metric shardCount must be > 1");
+ return InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_DIMENSIONAL_SAMPLING_INFO_INCORRECT_SHARD_COUNT,
+ metricId);
+ }
+ samplingInfo.shardCount = dimSamplingInfo.shard_count();
+
+ if (HasPositionALL(dimSamplingInfo.sampled_what_field()) ||
+ HasPositionANY(dimSamplingInfo.sampled_what_field())) {
+ ALOGE("metric has repeated field with position ALL or ANY as the sampled dimension");
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELD_INCORRECT_SIZE,
+ metricId);
+ }
+
+ translateFieldMatcher(dimSamplingInfo.sampled_what_field(), &samplingInfo.sampledWhatFields);
+ if (samplingInfo.sampledWhatFields.size() != 1) {
+ ALOGE("metric has incorrect number of sampled dimension fields");
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELD_INCORRECT_SIZE,
+ metricId);
+ }
+ if (!subsetDimensions(samplingInfo.sampledWhatFields, dimensionsInWhat)) {
+ return InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELDS_NOT_SUBSET_DIM_IN_WHAT, metricId);
+ }
+ return nullopt;
}
// Validates a metricActivation and populates state.
// EventActivationMap and EventDeactivationMap are supplied to a MetricProducer
// to provide the producer with state about its activators and deactivators.
// Returns false if there are errors.
-bool handleMetricActivation(
+optional<InvalidConfigReason> handleMetricActivation(
const StatsdConfig& config, const int64_t metricId, const int metricIndex,
const unordered_map<int64_t, int>& metricToActivationMap,
const unordered_map<int64_t, int>& atomMatchingTrackerMap,
@@ -242,7 +303,7 @@
// Check if metric has an associated activation
auto itr = metricToActivationMap.find(metricId);
if (itr == metricToActivationMap.end()) {
- return true;
+ return nullopt;
}
int activationIndex = itr->second;
@@ -254,7 +315,9 @@
auto itr = atomMatchingTrackerMap.find(activation.atom_matcher_id());
if (itr == atomMatchingTrackerMap.end()) {
ALOGE("Atom matcher not found for event activation.");
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_MATCHER_NOT_FOUND, metricId,
+ activation.atom_matcher_id());
}
ActivationType activationType = (activation.has_activation_type())
@@ -271,7 +334,9 @@
itr = atomMatchingTrackerMap.find(activation.deactivation_atom_matcher_id());
if (itr == atomMatchingTrackerMap.end()) {
ALOGE("Atom matcher not found for event deactivation.");
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_DEACTIVATION_MATCHER_NOT_FOUND, metricId,
+ activation.deactivation_atom_matcher_id());
}
int deactivationAtomMatcherIndex = itr->second;
deactivationAtomTrackerToMetricMap[deactivationAtomMatcherIndex].push_back(metricIndex);
@@ -280,13 +345,13 @@
}
metricsWithActivation.push_back(metricIndex);
- return true;
+ return nullopt;
}
// Validates a metricActivation and populates state.
// Fills the new event activation/deactivation maps, preserving the existing activations
// Returns false if there are errors.
-bool handleMetricActivationOnConfigUpdate(
+optional<InvalidConfigReason> handleMetricActivationOnConfigUpdate(
const StatsdConfig& config, const int64_t metricId, const int metricIndex,
const unordered_map<int64_t, int>& metricToActivationMap,
const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -300,7 +365,7 @@
// Check if metric has an associated activation.
const auto& itr = metricToActivationMap.find(metricId);
if (itr == metricToActivationMap.end()) {
- return true;
+ return nullopt;
}
int activationIndex = itr->second;
@@ -312,7 +377,9 @@
const auto& newActivationIt = newAtomMatchingTrackerMap.find(activationMatcherId);
if (newActivationIt == newAtomMatchingTrackerMap.end()) {
ALOGE("Atom matcher not found in new config for event activation.");
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_MATCHER_NOT_FOUND_NEW, metricId,
+ activationMatcherId);
}
int newActivationMatcherIndex = newActivationIt->second;
@@ -320,13 +387,17 @@
const auto& oldActivationIt = oldAtomMatchingTrackerMap.find(activationMatcherId);
if (oldActivationIt == oldAtomMatchingTrackerMap.end()) {
ALOGE("Atom matcher not found in existing config for event activation.");
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_MATCHER_NOT_FOUND_EXISTING, metricId,
+ activationMatcherId);
}
int oldActivationMatcherIndex = oldActivationIt->second;
const auto& oldEventActivationIt = oldEventActivationMap.find(oldActivationMatcherIndex);
if (oldEventActivationIt == oldEventActivationMap.end()) {
ALOGE("Could not find existing event activation to update");
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_NOT_FOUND_EXISTING, metricId,
+ activationMatcherId);
}
newEventActivationMap.emplace(newActivationMatcherIndex, oldEventActivationIt->second);
activationAtomTrackerToMetricMap[newActivationMatcherIndex].push_back(metricIndex);
@@ -337,7 +408,9 @@
const auto& newDeactivationIt = newAtomMatchingTrackerMap.find(deactivationMatcherId);
if (newDeactivationIt == newAtomMatchingTrackerMap.end()) {
ALOGE("Deactivation atom matcher not found in new config for event activation.");
- return false;
+ return createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_DEACTIVATION_MATCHER_NOT_FOUND_NEW, metricId,
+ deactivationMatcherId);
}
int newDeactivationMatcherIndex = newDeactivationIt->second;
newEventDeactivationMap[newDeactivationMatcherIndex].push_back(
@@ -347,7 +420,7 @@
}
metricsWithActivation.push_back(metricIndex);
- return true;
+ return nullopt;
}
optional<sp<MetricProducer>> createCountMetricProducerAndUpdateMetadata(
@@ -365,29 +438,34 @@
unordered_map<int, vector<int>>& conditionToMetricMap,
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation) {
+ vector<int>& metricsWithActivation, optional<InvalidConfigReason>& invalidConfigReason) {
if (!metric.has_id() || !metric.has_what()) {
ALOGE("cannot find metric id or \"what\" in CountMetric \"%lld\"", (long long)metric.id());
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metric.id());
return nullopt;
}
int trackerIndex;
- if (!handleMetricWithAtomMatchingTrackers(metric.what(), metricIndex,
- metric.has_dimensions_in_what(),
- allAtomMatchingTrackers, atomMatchingTrackerMap,
- trackerToMetricMap, trackerIndex)) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.what(), metric.id(), metricIndex, metric.has_dimensions_in_what(),
+ allAtomMatchingTrackers, atomMatchingTrackerMap, trackerToMetricMap, trackerIndex);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
int conditionIndex = -1;
if (metric.has_condition()) {
- if (!handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, conditionIndex,
- conditionToMetricMap)) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), metric.id(), metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, conditionIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else {
if (metric.links_size() > 0) {
ALOGW("metrics has a MetricConditionLink but doesn't have a condition");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION, metric.id());
return nullopt;
}
}
@@ -395,28 +473,47 @@
std::vector<int> slicedStateAtoms;
unordered_map<int, unordered_map<int, int64_t>> stateGroupMap;
if (metric.slice_by_state_size() > 0) {
- if (!handleMetricWithStates(config, metric.slice_by_state(), stateAtomIdMap,
- allStateGroupMaps, slicedStateAtoms, stateGroupMap)) {
+ invalidConfigReason =
+ handleMetricWithStates(config, metric.id(), metric.slice_by_state(), stateAtomIdMap,
+ allStateGroupMaps, slicedStateAtoms, stateGroupMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else {
if (metric.state_link_size() > 0) {
ALOGW("CountMetric has a MetricStateLink but doesn't have a slice_by_state");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_STATELINK_NO_STATE, metric.id());
+ return nullopt;
+ }
+ }
+
+ // Check that all metric state links are a subset of dimensions_in_what fields.
+ std::vector<Matcher> dimensionsInWhat;
+ translateFieldMatcher(metric.dimensions_in_what(), &dimensionsInWhat);
+ for (const auto& stateLink : metric.state_link()) {
+ invalidConfigReason = handleMetricWithStateLink(metric.id(), stateLink.fields_in_what(),
+ dimensionsInWhat);
+ if (invalidConfigReason.has_value()) {
+ ALOGW("CountMetric's MetricStateLinks must be a subset of dimensions in what");
return nullopt;
}
}
unordered_map<int, shared_ptr<Activation>> eventActivationMap;
unordered_map<int, vector<shared_ptr<Activation>>> eventDeactivationMap;
- if (!handleMetricActivation(config, metric.id(), metricIndex, metricToActivationMap,
- atomMatchingTrackerMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation,
- eventActivationMap, eventDeactivationMap)) {
+ invalidConfigReason = handleMetricActivation(
+ config, metric.id(), metricIndex, metricToActivationMap, atomMatchingTrackerMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation, eventActivationMap, eventDeactivationMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
uint64_t metricHash;
- if (!getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash)) {
+ invalidConfigReason =
+ getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
@@ -424,12 +521,27 @@
(metric.threshold().value_comparison_case() == UploadThreshold::kLtFloat ||
metric.threshold().value_comparison_case() == UploadThreshold::kGtFloat)) {
ALOGW("Count metric incorrect upload threshold type or no type used");
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_BAD_THRESHOLD, metric.id());
return nullopt;
}
- return {new CountMetricProducer(key, metric, conditionIndex, initialConditionCache, wizard,
+ sp<MetricProducer> metricProducer =
+ new CountMetricProducer(key, metric, conditionIndex, initialConditionCache, wizard,
metricHash, timeBaseNs, currentTimeNs, eventActivationMap,
- eventDeactivationMap, slicedStateAtoms, stateGroupMap)};
+ eventDeactivationMap, slicedStateAtoms, stateGroupMap);
+
+ SamplingInfo samplingInfo;
+ if (metric.has_dimensional_sampling_info()) {
+ invalidConfigReason = handleMetricWithSampling(
+ metric.id(), metric.dimensional_sampling_info(), dimensionsInWhat, samplingInfo);
+ if (invalidConfigReason.has_value()) {
+ return nullopt;
+ }
+ metricProducer->setSamplingInfo(samplingInfo);
+ }
+
+ return metricProducer;
}
optional<sp<MetricProducer>> createDurationMetricProducerAndUpdateMetadata(
@@ -447,15 +559,19 @@
unordered_map<int, vector<int>>& conditionToMetricMap,
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation) {
+ vector<int>& metricsWithActivation, optional<InvalidConfigReason>& invalidConfigReason) {
if (!metric.has_id() || !metric.has_what()) {
ALOGE("cannot find metric id or \"what\" in DurationMetric \"%lld\"",
(long long)metric.id());
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metric.id());
return nullopt;
}
const auto& what_it = conditionTrackerMap.find(metric.what());
if (what_it == conditionTrackerMap.end()) {
ALOGE("DurationMetric's \"what\" is not present in the condition trackers");
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_DURATION_METRIC_WHAT_NOT_FOUND, metric.id(), metric.what());
return nullopt;
}
@@ -463,6 +579,8 @@
const Predicate& durationWhat = config.predicate(whatIndex);
if (durationWhat.contents_case() != Predicate::ContentsCase::kSimplePredicate) {
ALOGE("DurationMetric's \"what\" must be a simple condition");
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_DURATION_METRIC_WHAT_NOT_SIMPLE, metric.id(), metric.what());
return nullopt;
}
@@ -470,40 +588,52 @@
bool nesting = simplePredicate.count_nesting();
int startIndex = -1, stopIndex = -1, stopAllIndex = -1;
- if (!simplePredicate.has_start() ||
- !handleMetricWithAtomMatchingTrackers(
- simplePredicate.start(), metricIndex, metric.has_dimensions_in_what(),
- allAtomMatchingTrackers, atomMatchingTrackerMap, trackerToMetricMap, startIndex)) {
+ if (!simplePredicate.has_start()) {
ALOGE("Duration metrics must specify a valid start event matcher");
+ invalidConfigReason = createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_DURATION_METRIC_MISSING_START, metric.id(), metric.what());
+ return nullopt;
+ }
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ simplePredicate.start(), metric.id(), metricIndex, metric.has_dimensions_in_what(),
+ allAtomMatchingTrackers, atomMatchingTrackerMap, trackerToMetricMap, startIndex);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
- if (simplePredicate.has_stop() &&
- !handleMetricWithAtomMatchingTrackers(
- simplePredicate.stop(), metricIndex, metric.has_dimensions_in_what(),
- allAtomMatchingTrackers, atomMatchingTrackerMap, trackerToMetricMap, stopIndex)) {
- return nullopt;
+ if (simplePredicate.has_stop()) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ simplePredicate.stop(), metric.id(), metricIndex, metric.has_dimensions_in_what(),
+ allAtomMatchingTrackers, atomMatchingTrackerMap, trackerToMetricMap, stopIndex);
+ if (invalidConfigReason.has_value()) {
+ return nullopt;
+ }
}
- if (simplePredicate.has_stop_all() &&
- !handleMetricWithAtomMatchingTrackers(simplePredicate.stop_all(), metricIndex,
- metric.has_dimensions_in_what(),
- allAtomMatchingTrackers, atomMatchingTrackerMap,
- trackerToMetricMap, stopAllIndex)) {
- return nullopt;
+ if (simplePredicate.has_stop_all()) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ simplePredicate.stop_all(), metric.id(), metricIndex,
+ metric.has_dimensions_in_what(), allAtomMatchingTrackers, atomMatchingTrackerMap,
+ trackerToMetricMap, stopAllIndex);
+ if (invalidConfigReason.has_value()) {
+ return nullopt;
+ }
}
FieldMatcher internalDimensions = simplePredicate.dimensions();
int conditionIndex = -1;
if (metric.has_condition()) {
- if (!handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, conditionIndex,
- conditionToMetricMap)) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), metric.id(), metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, conditionIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else if (metric.links_size() > 0) {
ALOGW("metrics has a MetricConditionLink but doesn't have a condition");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION, metric.id());
return nullopt;
}
@@ -512,14 +642,21 @@
if (metric.slice_by_state_size() > 0) {
if (metric.aggregation_type() == DurationMetric::MAX_SPARSE) {
ALOGE("DurationMetric with aggregation type MAX_SPARSE cannot be sliced by state");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_DURATION_METRIC_MAX_SPARSE_HAS_SLICE_BY_STATE,
+ metric.id());
return nullopt;
}
- if (!handleMetricWithStates(config, metric.slice_by_state(), stateAtomIdMap,
- allStateGroupMaps, slicedStateAtoms, stateGroupMap)) {
+ invalidConfigReason =
+ handleMetricWithStates(config, metric.id(), metric.slice_by_state(), stateAtomIdMap,
+ allStateGroupMaps, slicedStateAtoms, stateGroupMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else if (metric.state_link_size() > 0) {
ALOGW("DurationMetric has a MetricStateLink but doesn't have a sliced state");
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_STATELINK_NO_STATE, metric.id());
return nullopt;
}
@@ -527,7 +664,9 @@
std::vector<Matcher> dimensionsInWhat;
translateFieldMatcher(metric.dimensions_in_what(), &dimensionsInWhat);
for (const auto& stateLink : metric.state_link()) {
- if (!handleMetricWithStateLink(stateLink.fields_in_what(), dimensionsInWhat)) {
+ invalidConfigReason = handleMetricWithStateLink(metric.id(), stateLink.fields_in_what(),
+ dimensionsInWhat);
+ if (invalidConfigReason.has_value()) {
ALOGW("DurationMetric's MetricStateLinks must be a subset of dimensions in what");
return nullopt;
}
@@ -535,15 +674,18 @@
unordered_map<int, shared_ptr<Activation>> eventActivationMap;
unordered_map<int, vector<shared_ptr<Activation>>> eventDeactivationMap;
- if (!handleMetricActivation(config, metric.id(), metricIndex, metricToActivationMap,
- atomMatchingTrackerMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation,
- eventActivationMap, eventDeactivationMap)) {
+ invalidConfigReason = handleMetricActivation(
+ config, metric.id(), metricIndex, metricToActivationMap, atomMatchingTrackerMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation, eventActivationMap, eventDeactivationMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
uint64_t metricHash;
- if (!getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash)) {
+ invalidConfigReason =
+ getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
@@ -556,20 +698,35 @@
break;
default:
ALOGE("Duration metric incorrect upload threshold type or no type used");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_BAD_THRESHOLD, metric.id());
return nullopt;
- break;
}
}
- sp<MetricProducer> producer = new DurationMetricProducer(
+ sp<MetricProducer> metricProducer = new DurationMetricProducer(
key, metric, conditionIndex, initialConditionCache, whatIndex, startIndex, stopIndex,
stopAllIndex, nesting, wizard, metricHash, internalDimensions, timeBaseNs,
currentTimeNs, eventActivationMap, eventDeactivationMap, slicedStateAtoms,
stateGroupMap);
- if (!producer->isValid()) {
+ if (!metricProducer->isValid()) {
+ // TODO: Remove once invalidConfigReason is added to the DurationMetricProducer constructor
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_DURATION_METRIC_PRODUCER_INVALID, metric.id());
return nullopt;
}
- return {producer};
+
+ SamplingInfo samplingInfo;
+ if (metric.has_dimensional_sampling_info()) {
+ invalidConfigReason = handleMetricWithSampling(
+ metric.id(), metric.dimensional_sampling_info(), dimensionsInWhat, samplingInfo);
+ if (invalidConfigReason.has_value()) {
+ return nullopt;
+ }
+ metricProducer->setSamplingInfo(samplingInfo);
+ }
+
+ return metricProducer;
}
optional<sp<MetricProducer>> createEventMetricProducerAndUpdateMetadata(
@@ -585,45 +742,58 @@
unordered_map<int, vector<int>>& conditionToMetricMap,
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation) {
+ vector<int>& metricsWithActivation, optional<InvalidConfigReason>& invalidConfigReason) {
if (!metric.has_id() || !metric.has_what()) {
ALOGE("cannot find the metric name or what in config");
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metric.id());
return nullopt;
}
int trackerIndex;
- if (!handleMetricWithAtomMatchingTrackers(metric.what(), metricIndex, false,
- allAtomMatchingTrackers, atomMatchingTrackerMap,
- trackerToMetricMap, trackerIndex)) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.what(), metric.id(), metricIndex, false, allAtomMatchingTrackers,
+ atomMatchingTrackerMap, trackerToMetricMap, trackerIndex);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
int conditionIndex = -1;
if (metric.has_condition()) {
- if (!handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, conditionIndex,
- conditionToMetricMap)) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), metric.id(), metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, conditionIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else {
if (metric.links_size() > 0) {
ALOGW("metrics has a MetricConditionLink but doesn't have a condition");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION, metric.id());
return nullopt;
}
}
unordered_map<int, shared_ptr<Activation>> eventActivationMap;
unordered_map<int, vector<shared_ptr<Activation>>> eventDeactivationMap;
- bool success = handleMetricActivation(config, metric.id(), metricIndex, metricToActivationMap,
- atomMatchingTrackerMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation,
- eventActivationMap, eventDeactivationMap);
- if (!success) return nullptr;
+ invalidConfigReason = handleMetricActivation(
+ config, metric.id(), metricIndex, metricToActivationMap, atomMatchingTrackerMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation, eventActivationMap, eventDeactivationMap);
+ if (invalidConfigReason.has_value()) return nullptr;
uint64_t metricHash;
- if (!getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash)) {
+ invalidConfigReason =
+ getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
+ if (config.has_restricted_metrics_delegate_package_name()) {
+ return {new RestrictedEventMetricProducer(
+ key, metric, conditionIndex, initialConditionCache, wizard, metricHash, timeBaseNs,
+ eventActivationMap, eventDeactivationMap)};
+ }
return {new EventMetricProducer(key, metric, conditionIndex, initialConditionCache, wizard,
metricHash, timeBaseNs, eventActivationMap,
eventDeactivationMap)};
@@ -646,32 +816,41 @@
unordered_map<int, vector<int>>& conditionToMetricMap,
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation) {
+ vector<int>& metricsWithActivation, optional<InvalidConfigReason>& invalidConfigReason) {
if (!metric.has_id() || !metric.has_what()) {
ALOGE("cannot find metric id or \"what\" in ValueMetric \"%lld\"", (long long)metric.id());
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metric.id());
return nullopt;
}
if (!metric.has_value_field()) {
ALOGE("cannot find \"value_field\" in ValueMetric \"%lld\"", (long long)metric.id());
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_VALUE_METRIC_MISSING_VALUE_FIELD, metric.id());
return nullopt;
}
if (HasPositionALL(metric.value_field())) {
ALOGE("value field with position ALL is not supported. ValueMetric \"%lld\"",
(long long)metric.id());
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_VALUE_METRIC_VALUE_FIELD_HAS_POSITION_ALL, metric.id());
return nullopt;
}
std::vector<Matcher> fieldMatchers;
translateFieldMatcher(metric.value_field(), &fieldMatchers);
if (fieldMatchers.size() < 1) {
ALOGE("incorrect \"value_field\" in ValueMetric \"%lld\"", (long long)metric.id());
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_VALUE_METRIC_HAS_INCORRECT_VALUE_FIELD, metric.id());
return nullopt;
}
int trackerIndex;
- if (!handleMetricWithAtomMatchingTrackers(metric.what(), metricIndex,
- /*enforceOneAtom=*/true, allAtomMatchingTrackers,
- atomMatchingTrackerMap, trackerToMetricMap,
- trackerIndex)) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.what(), metric.id(), metricIndex,
+ /*enforceOneAtom=*/true, allAtomMatchingTrackers, atomMatchingTrackerMap,
+ trackerToMetricMap, trackerIndex);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
@@ -681,25 +860,32 @@
int conditionIndex = -1;
if (metric.has_condition()) {
- if (!handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, conditionIndex,
- conditionToMetricMap)) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), metric.id(), metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, conditionIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else if (metric.links_size() > 0) {
ALOGE("metrics has a MetricConditionLink but doesn't have a condition");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION, metric.id());
return nullopt;
}
std::vector<int> slicedStateAtoms;
unordered_map<int, unordered_map<int, int64_t>> stateGroupMap;
if (metric.slice_by_state_size() > 0) {
- if (!handleMetricWithStates(config, metric.slice_by_state(), stateAtomIdMap,
- allStateGroupMaps, slicedStateAtoms, stateGroupMap)) {
+ invalidConfigReason =
+ handleMetricWithStates(config, metric.id(), metric.slice_by_state(), stateAtomIdMap,
+ allStateGroupMaps, slicedStateAtoms, stateGroupMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else if (metric.state_link_size() > 0) {
ALOGE("ValueMetric has a MetricStateLink but doesn't have a sliced state");
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_STATELINK_NO_STATE, metric.id());
return nullopt;
}
@@ -707,7 +893,9 @@
std::vector<Matcher> dimensionsInWhat;
translateFieldMatcher(metric.dimensions_in_what(), &dimensionsInWhat);
for (const auto& stateLink : metric.state_link()) {
- if (!handleMetricWithStateLink(stateLink.fields_in_what(), dimensionsInWhat)) {
+ invalidConfigReason = handleMetricWithStateLink(metric.id(), stateLink.fields_in_what(),
+ dimensionsInWhat);
+ if (invalidConfigReason.has_value()) {
ALOGW("ValueMetric's MetricStateLinks must be a subset of the dimensions in what");
return nullopt;
}
@@ -715,15 +903,18 @@
unordered_map<int, shared_ptr<Activation>> eventActivationMap;
unordered_map<int, vector<shared_ptr<Activation>>> eventDeactivationMap;
- if (!handleMetricActivation(config, metric.id(), metricIndex, metricToActivationMap,
- atomMatchingTrackerMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation,
- eventActivationMap, eventDeactivationMap)) {
+ invalidConfigReason = handleMetricActivation(
+ config, metric.id(), metricIndex, metricToActivationMap, atomMatchingTrackerMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation, eventActivationMap, eventDeactivationMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
uint64_t metricHash;
- if (!getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash)) {
+ invalidConfigReason =
+ getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
@@ -744,7 +935,7 @@
? optional<int64_t>(metric.condition_correction_threshold_nanos())
: nullopt;
- return new NumericValueMetricProducer(
+ sp<MetricProducer> metricProducer = new NumericValueMetricProducer(
key, metric, metricHash, {pullTagId, pullerManager},
{timeBaseNs, currentTimeNs, bucketSizeNs, metric.min_bucket_size_nanos(),
conditionCorrectionThresholdNs, getAppUpgradeBucketSplit(metric)},
@@ -753,6 +944,18 @@
{conditionIndex, metric.links(), initialConditionCache, wizard},
{metric.state_link(), slicedStateAtoms, stateGroupMap},
{eventActivationMap, eventDeactivationMap}, {dimensionSoftLimit, dimensionHardLimit});
+
+ SamplingInfo samplingInfo;
+ if (metric.has_dimensional_sampling_info()) {
+ invalidConfigReason = handleMetricWithSampling(
+ metric.id(), metric.dimensional_sampling_info(), dimensionsInWhat, samplingInfo);
+ if (invalidConfigReason.has_value()) {
+ return nullopt;
+ }
+ metricProducer->setSamplingInfo(samplingInfo);
+ }
+
+ return metricProducer;
}
optional<sp<MetricProducer>> createKllMetricProducerAndUpdateMetadata(
@@ -772,56 +975,72 @@
unordered_map<int, vector<int>>& conditionToMetricMap,
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation) {
+ vector<int>& metricsWithActivation, optional<InvalidConfigReason>& invalidConfigReason) {
if (!metric.has_id() || !metric.has_what()) {
ALOGE("cannot find metric id or \"what\" in KllMetric \"%lld\"", (long long)metric.id());
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metric.id());
return nullopt;
}
if (!metric.has_kll_field()) {
ALOGE("cannot find \"kll_field\" in KllMetric \"%lld\"", (long long)metric.id());
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_KLL_METRIC_MISSING_KLL_FIELD, metric.id());
return nullopt;
}
if (HasPositionALL(metric.kll_field())) {
ALOGE("kll field with position ALL is not supported. KllMetric \"%lld\"",
(long long)metric.id());
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_KLL_METRIC_KLL_FIELD_HAS_POSITION_ALL, metric.id());
return nullopt;
}
std::vector<Matcher> fieldMatchers;
translateFieldMatcher(metric.kll_field(), &fieldMatchers);
if (fieldMatchers.empty()) {
ALOGE("incorrect \"kll_field\" in KllMetric \"%lld\"", (long long)metric.id());
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_KLL_METRIC_HAS_INCORRECT_KLL_FIELD, metric.id());
return nullopt;
}
int trackerIndex;
- if (!handleMetricWithAtomMatchingTrackers(metric.what(), metricIndex,
- /*enforceOneAtom=*/true, allAtomMatchingTrackers,
- atomMatchingTrackerMap, trackerToMetricMap,
- trackerIndex)) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.what(), metric.id(), metricIndex,
+ /*enforceOneAtom=*/true, allAtomMatchingTrackers, atomMatchingTrackerMap,
+ trackerToMetricMap, trackerIndex);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
int conditionIndex = -1;
if (metric.has_condition()) {
- if (!handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, conditionIndex,
- conditionToMetricMap)) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), metric.id(), metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, conditionIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else if (metric.links_size() > 0) {
ALOGE("metrics has a MetricConditionLink but doesn't have a condition");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION, metric.id());
return nullopt;
}
std::vector<int> slicedStateAtoms;
unordered_map<int, unordered_map<int, int64_t>> stateGroupMap;
if (metric.slice_by_state_size() > 0) {
- if (!handleMetricWithStates(config, metric.slice_by_state(), stateAtomIdMap,
- allStateGroupMaps, slicedStateAtoms, stateGroupMap)) {
+ invalidConfigReason =
+ handleMetricWithStates(config, metric.id(), metric.slice_by_state(), stateAtomIdMap,
+ allStateGroupMaps, slicedStateAtoms, stateGroupMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else if (metric.state_link_size() > 0) {
ALOGE("KllMetric has a MetricStateLink but doesn't have a sliced state");
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_STATELINK_NO_STATE, metric.id());
return nullopt;
}
@@ -829,7 +1048,9 @@
std::vector<Matcher> dimensionsInWhat;
translateFieldMatcher(metric.dimensions_in_what(), &dimensionsInWhat);
for (const auto& stateLink : metric.state_link()) {
- if (!handleMetricWithStateLink(stateLink.fields_in_what(), dimensionsInWhat)) {
+ invalidConfigReason = handleMetricWithStateLink(metric.id(), stateLink.fields_in_what(),
+ dimensionsInWhat);
+ if (invalidConfigReason.has_value()) {
ALOGW("KllMetric's MetricStateLinks must be a subset of the dimensions in what");
return nullopt;
}
@@ -837,15 +1058,18 @@
unordered_map<int, shared_ptr<Activation>> eventActivationMap;
unordered_map<int, vector<shared_ptr<Activation>>> eventDeactivationMap;
- if (!handleMetricActivation(config, metric.id(), metricIndex, metricToActivationMap,
- atomMatchingTrackerMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation,
- eventActivationMap, eventDeactivationMap)) {
+ invalidConfigReason = handleMetricActivation(
+ config, metric.id(), metricIndex, metricToActivationMap, atomMatchingTrackerMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation, eventActivationMap, eventDeactivationMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
uint64_t metricHash;
- if (!getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash)) {
+ invalidConfigReason =
+ getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
@@ -862,7 +1086,7 @@
const auto [dimensionSoftLimit, dimensionHardLimit] =
StatsdStats::getAtomDimensionKeySizeLimits(atomTagId);
- return new KllMetricProducer(
+ sp<MetricProducer> metricProducer = new KllMetricProducer(
key, metric, metricHash, {/*pullTagId=*/-1, pullerManager},
{timeBaseNs, currentTimeNs, bucketSizeNs, metric.min_bucket_size_nanos(),
/*conditionCorrectionThresholdNs=*/nullopt, getAppUpgradeBucketSplit(metric)},
@@ -871,6 +1095,18 @@
{conditionIndex, metric.links(), initialConditionCache, wizard},
{metric.state_link(), slicedStateAtoms, stateGroupMap},
{eventActivationMap, eventDeactivationMap}, {dimensionSoftLimit, dimensionHardLimit});
+
+ SamplingInfo samplingInfo;
+ if (metric.has_dimensional_sampling_info()) {
+ invalidConfigReason = handleMetricWithSampling(
+ metric.id(), metric.dimensional_sampling_info(), dimensionsInWhat, samplingInfo);
+ if (invalidConfigReason.has_value()) {
+ return nullopt;
+ }
+ metricProducer->setSamplingInfo(samplingInfo);
+ }
+
+ return metricProducer;
}
optional<sp<MetricProducer>> createGaugeMetricProducerAndUpdateMetadata(
@@ -888,9 +1124,11 @@
unordered_map<int, vector<int>>& conditionToMetricMap,
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation) {
+ vector<int>& metricsWithActivation, optional<InvalidConfigReason>& invalidConfigReason) {
if (!metric.has_id() || !metric.has_what()) {
ALOGE("cannot find metric id or \"what\" in GaugeMetric \"%lld\"", (long long)metric.id());
+ invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metric.id());
return nullopt;
}
@@ -898,28 +1136,28 @@
(metric.gauge_fields_filter().include_all() == false)) &&
!hasLeafNode(metric.gauge_fields_filter().fields())) {
ALOGW("Incorrect field filter setting in GaugeMetric %lld", (long long)metric.id());
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_GAUGE_METRIC_INCORRECT_FIELD_FILTER, metric.id());
return nullopt;
}
if ((metric.gauge_fields_filter().has_include_all() &&
metric.gauge_fields_filter().include_all() == true) &&
hasLeafNode(metric.gauge_fields_filter().fields())) {
ALOGW("Incorrect field filter setting in GaugeMetric %lld", (long long)metric.id());
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_GAUGE_METRIC_INCORRECT_FIELD_FILTER, metric.id());
return nullopt;
}
int trackerIndex;
- if (!handleMetricWithAtomMatchingTrackers(metric.what(), metricIndex,
- metric.has_dimensions_in_what(),
- allAtomMatchingTrackers, atomMatchingTrackerMap,
- trackerToMetricMap, trackerIndex)) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.what(), metric.id(), metricIndex, true, allAtomMatchingTrackers,
+ atomMatchingTrackerMap, trackerToMetricMap, trackerIndex);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
sp<AtomMatchingTracker> atomMatcher = allAtomMatchingTrackers.at(trackerIndex);
- // For GaugeMetric atom, it should be simple matcher with one tagId.
- if (atomMatcher->getAtomIds().size() != 1) {
- return nullopt;
- }
int atomTagId = *(atomMatcher->getAtomIds().begin());
int pullTagId = pullerManager->PullerForMatcherExists(atomTagId) ? atomTagId : -1;
@@ -928,17 +1166,22 @@
if (metric.has_trigger_event()) {
if (pullTagId == -1) {
ALOGW("Pull atom not specified for trigger");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_GAUGE_METRIC_TRIGGER_NO_PULL_ATOM, metric.id());
return nullopt;
}
// trigger_event should be used with FIRST_N_SAMPLES
if (metric.sampling_type() != GaugeMetric::FIRST_N_SAMPLES) {
ALOGW("Gauge Metric with trigger event must have sampling type FIRST_N_SAMPLES");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_GAUGE_METRIC_TRIGGER_NO_FIRST_N_SAMPLES, metric.id());
return nullopt;
}
- if (!handleMetricWithAtomMatchingTrackers(metric.trigger_event(), metricIndex,
- /*enforceOneAtom=*/true, allAtomMatchingTrackers,
- atomMatchingTrackerMap, trackerToMetricMap,
- triggerTrackerIndex)) {
+ invalidConfigReason = handleMetricWithAtomMatchingTrackers(
+ metric.trigger_event(), metric.id(), metricIndex,
+ /*enforceOneAtom=*/true, allAtomMatchingTrackers, atomMatchingTrackerMap,
+ trackerToMetricMap, triggerTrackerIndex);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
sp<AtomMatchingTracker> triggerAtomMatcher =
@@ -946,68 +1189,89 @@
triggerAtomId = *(triggerAtomMatcher->getAtomIds().begin());
}
- if (!metric.has_trigger_event() && pullTagId != -1 &&
- metric.sampling_type() == GaugeMetric::FIRST_N_SAMPLES) {
- ALOGW("FIRST_N_SAMPLES is only for pushed event or pull_on_trigger");
- return nullopt;
- }
-
int conditionIndex = -1;
if (metric.has_condition()) {
- if (!handleMetricWithConditions(metric.condition(), metricIndex, conditionTrackerMap,
- metric.links(), allConditionTrackers, conditionIndex,
- conditionToMetricMap)) {
+ invalidConfigReason = handleMetricWithConditions(
+ metric.condition(), metric.id(), metricIndex, conditionTrackerMap, metric.links(),
+ allConditionTrackers, conditionIndex, conditionToMetricMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
} else {
if (metric.links_size() > 0) {
ALOGW("metrics has a MetricConditionLink but doesn't have a condition");
+ invalidConfigReason = InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION, metric.id());
return nullopt;
}
}
unordered_map<int, shared_ptr<Activation>> eventActivationMap;
unordered_map<int, vector<shared_ptr<Activation>>> eventDeactivationMap;
- if (!handleMetricActivation(config, metric.id(), metricIndex, metricToActivationMap,
- atomMatchingTrackerMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation,
- eventActivationMap, eventDeactivationMap)) {
+ invalidConfigReason = handleMetricActivation(
+ config, metric.id(), metricIndex, metricToActivationMap, atomMatchingTrackerMap,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation, eventActivationMap, eventDeactivationMap);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
uint64_t metricHash;
- if (!getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash)) {
+ invalidConfigReason =
+ getMetricProtoHash(config, metric, metric.id(), metricToActivationMap, metricHash);
+ if (invalidConfigReason.has_value()) {
return nullopt;
}
const auto [dimensionSoftLimit, dimensionHardLimit] =
StatsdStats::getAtomDimensionKeySizeLimits(pullTagId);
- return {new GaugeMetricProducer(key, metric, conditionIndex, initialConditionCache, wizard,
- metricHash, trackerIndex, matcherWizard, pullTagId,
- triggerAtomId, atomTagId, timeBaseNs, currentTimeNs,
- pullerManager, eventActivationMap, eventDeactivationMap,
- dimensionSoftLimit, dimensionHardLimit)};
+ sp<MetricProducer> metricProducer = new GaugeMetricProducer(
+ key, metric, conditionIndex, initialConditionCache, wizard, metricHash, trackerIndex,
+ matcherWizard, pullTagId, triggerAtomId, atomTagId, timeBaseNs, currentTimeNs,
+ pullerManager, eventActivationMap, eventDeactivationMap, dimensionSoftLimit,
+ dimensionHardLimit);
+
+ SamplingInfo samplingInfo;
+ std::vector<Matcher> dimensionsInWhat;
+ translateFieldMatcher(metric.dimensions_in_what(), &dimensionsInWhat);
+ if (metric.has_dimensional_sampling_info()) {
+ invalidConfigReason = handleMetricWithSampling(
+ metric.id(), metric.dimensional_sampling_info(), dimensionsInWhat, samplingInfo);
+ if (invalidConfigReason.has_value()) {
+ return nullopt;
+ }
+ metricProducer->setSamplingInfo(samplingInfo);
+ }
+
+ return metricProducer;
}
optional<sp<AnomalyTracker>> createAnomalyTracker(
const Alert& alert, const sp<AlarmMonitor>& anomalyAlarmMonitor,
const UpdateStatus& updateStatus, const int64_t currentTimeNs,
const unordered_map<int64_t, int>& metricProducerMap,
- vector<sp<MetricProducer>>& allMetricProducers) {
+ vector<sp<MetricProducer>>& allMetricProducers,
+ optional<InvalidConfigReason>& invalidConfigReason) {
const auto& itr = metricProducerMap.find(alert.metric_id());
if (itr == metricProducerMap.end()) {
ALOGW("alert \"%lld\" has unknown metric id: \"%lld\"", (long long)alert.id(),
(long long)alert.metric_id());
+ invalidConfigReason = createInvalidConfigReasonWithAlert(
+ INVALID_CONFIG_REASON_ALERT_METRIC_NOT_FOUND, alert.metric_id(), alert.id());
return nullopt;
}
if (!alert.has_trigger_if_sum_gt()) {
ALOGW("invalid alert: missing threshold");
+ invalidConfigReason = createInvalidConfigReasonWithAlert(
+ INVALID_CONFIG_REASON_ALERT_THRESHOLD_MISSING, alert.id());
return nullopt;
}
if (alert.trigger_if_sum_gt() < 0 || alert.num_buckets() <= 0) {
ALOGW("invalid alert: threshold=%f num_buckets= %d", alert.trigger_if_sum_gt(),
alert.num_buckets());
+ invalidConfigReason = createInvalidConfigReasonWithAlert(
+ INVALID_CONFIG_REASON_ALERT_INVALID_TRIGGER_OR_NUM_BUCKETS, alert.id());
return nullopt;
}
const int metricIndex = itr->second;
@@ -1016,71 +1280,97 @@
metric->addAnomalyTracker(alert, anomalyAlarmMonitor, updateStatus, currentTimeNs);
if (anomalyTracker == nullptr) {
// The ALOGW for this invalid alert was already displayed in addAnomalyTracker().
+ invalidConfigReason = createInvalidConfigReasonWithAlert(
+ INVALID_CONFIG_REASON_ALERT_CANNOT_ADD_ANOMALY, alert.metric_id(), alert.id());
return nullopt;
}
return {anomalyTracker};
}
-bool initAtomMatchingTrackers(const StatsdConfig& config, const sp<UidMap>& uidMap,
- unordered_map<int64_t, int>& atomMatchingTrackerMap,
- vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- set<int>& allTagIds) {
+optional<InvalidConfigReason> initAtomMatchingTrackers(
+ const StatsdConfig& config, const sp<UidMap>& uidMap,
+ unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ unordered_map<int, vector<int>>& allTagIdsToMatchersMap) {
vector<AtomMatcher> matcherConfigs;
const int atomMatcherCount = config.atom_matcher_size();
matcherConfigs.reserve(atomMatcherCount);
allAtomMatchingTrackers.reserve(atomMatcherCount);
+ optional<InvalidConfigReason> invalidConfigReason;
for (int i = 0; i < atomMatcherCount; i++) {
const AtomMatcher& logMatcher = config.atom_matcher(i);
- sp<AtomMatchingTracker> tracker = createAtomMatchingTracker(logMatcher, i, uidMap);
+ sp<AtomMatchingTracker> tracker =
+ createAtomMatchingTracker(logMatcher, i, uidMap, invalidConfigReason);
if (tracker == nullptr) {
- return false;
+ return invalidConfigReason;
}
allAtomMatchingTrackers.push_back(tracker);
if (atomMatchingTrackerMap.find(logMatcher.id()) != atomMatchingTrackerMap.end()) {
ALOGE("Duplicate AtomMatcher found!");
- return false;
+ return createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_MATCHER_DUPLICATE,
+ logMatcher.id());
}
atomMatchingTrackerMap[logMatcher.id()] = i;
matcherConfigs.push_back(logMatcher);
}
vector<bool> stackTracker2(allAtomMatchingTrackers.size(), false);
- for (auto& matcher : allAtomMatchingTrackers) {
- if (!matcher->init(matcherConfigs, allAtomMatchingTrackers, atomMatchingTrackerMap,
- stackTracker2)) {
- return false;
+ for (size_t matcherIndex = 0; matcherIndex < allAtomMatchingTrackers.size(); matcherIndex++) {
+ auto& matcher = allAtomMatchingTrackers[matcherIndex];
+ invalidConfigReason = matcher->init(matcherConfigs, allAtomMatchingTrackers,
+ atomMatchingTrackerMap, stackTracker2);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
+
// Collect all the tag ids that are interesting. TagIds exist in leaf nodes only.
const set<int>& tagIds = matcher->getAtomIds();
- allTagIds.insert(tagIds.begin(), tagIds.end());
+ for (int atomId : tagIds) {
+ auto& matchers = allTagIdsToMatchersMap[atomId];
+ // Performance note:
+ // For small amount of elements linear search in vector will be
+ // faster then look up in a set:
+ // - we do not expect matchers vector per atom id will have significant size (< 10)
+ // - iteration via vector is the fastest way compared to other containers (set, etc.)
+ // in the hot path MetricsManager::onLogEvent()
+ // - vector<T> will have the smallest memory footprint compared to any other
+ // std containers implementation
+ if (find(matchers.begin(), matchers.end(), matcherIndex) == matchers.end()) {
+ matchers.push_back(matcherIndex);
+ }
+ }
}
- return true;
+
+ return nullopt;
}
-bool initConditions(const ConfigKey& key, const StatsdConfig& config,
- const unordered_map<int64_t, int>& atomMatchingTrackerMap,
- unordered_map<int64_t, int>& conditionTrackerMap,
- vector<sp<ConditionTracker>>& allConditionTrackers,
- unordered_map<int, std::vector<int>>& trackerToConditionMap,
- vector<ConditionState>& initialConditionCache) {
+optional<InvalidConfigReason> initConditions(
+ const ConfigKey& key, const StatsdConfig& config,
+ const unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ unordered_map<int64_t, int>& conditionTrackerMap,
+ vector<sp<ConditionTracker>>& allConditionTrackers,
+ unordered_map<int, std::vector<int>>& trackerToConditionMap,
+ vector<ConditionState>& initialConditionCache) {
vector<Predicate> conditionConfigs;
const int conditionTrackerCount = config.predicate_size();
conditionConfigs.reserve(conditionTrackerCount);
allConditionTrackers.reserve(conditionTrackerCount);
initialConditionCache.assign(conditionTrackerCount, ConditionState::kNotEvaluated);
+ optional<InvalidConfigReason> invalidConfigReason;
for (int i = 0; i < conditionTrackerCount; i++) {
const Predicate& condition = config.predicate(i);
- sp<ConditionTracker> tracker =
- createConditionTracker(key, condition, i, atomMatchingTrackerMap);
+ sp<ConditionTracker> tracker = createConditionTracker(
+ key, condition, i, atomMatchingTrackerMap, invalidConfigReason);
if (tracker == nullptr) {
- return false;
+ return invalidConfigReason;
}
allConditionTrackers.push_back(tracker);
if (conditionTrackerMap.find(condition.id()) != conditionTrackerMap.end()) {
ALOGE("Duplicate Predicate found!");
- return false;
+ return createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_CONDITION_DUPLICATE,
+ condition.id());
}
conditionTrackerMap[condition.id()] = i;
conditionConfigs.push_back(condition);
@@ -1089,21 +1379,24 @@
vector<bool> stackTracker(allConditionTrackers.size(), false);
for (size_t i = 0; i < allConditionTrackers.size(); i++) {
auto& conditionTracker = allConditionTrackers[i];
- if (!conditionTracker->init(conditionConfigs, allConditionTrackers, conditionTrackerMap,
- stackTracker, initialConditionCache)) {
- return false;
+ invalidConfigReason =
+ conditionTracker->init(conditionConfigs, allConditionTrackers, conditionTrackerMap,
+ stackTracker, initialConditionCache);
+ if (invalidConfigReason.has_value()) {
+ return invalidConfigReason;
}
for (const int trackerIndex : conditionTracker->getAtomMatchingTrackerIndex()) {
auto& conditionList = trackerToConditionMap[trackerIndex];
conditionList.push_back(i);
}
}
- return true;
+ return nullopt;
}
-bool initStates(const StatsdConfig& config, unordered_map<int64_t, int>& stateAtomIdMap,
- unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
- map<int64_t, uint64_t>& stateProtoHashes) {
+optional<InvalidConfigReason> initStates(
+ const StatsdConfig& config, unordered_map<int64_t, int>& stateAtomIdMap,
+ unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
+ map<int64_t, uint64_t>& stateProtoHashes) {
for (int i = 0; i < config.state_size(); i++) {
const State& state = config.state(i);
const int64_t stateId = state.id();
@@ -1112,7 +1405,8 @@
string serializedState;
if (!state.SerializeToString(&serializedState)) {
ALOGE("Unable to serialize state %lld", (long long)stateId);
- return false;
+ return createInvalidConfigReasonWithState(
+ INVALID_CONFIG_REASON_STATE_SERIALIZATION_FAILED, state.id(), state.atom_id());
}
stateProtoHashes[stateId] = Hash64(serializedState);
@@ -1124,31 +1418,39 @@
}
}
- return true;
+ return nullopt;
}
-bool initMetrics(const ConfigKey& key, const StatsdConfig& config, const int64_t timeBaseTimeNs,
- const int64_t currentTimeNs, const sp<StatsPullerManager>& pullerManager,
- const unordered_map<int64_t, int>& atomMatchingTrackerMap,
- const unordered_map<int64_t, int>& conditionTrackerMap,
- const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- const unordered_map<int64_t, int>& stateAtomIdMap,
- const unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
- vector<sp<ConditionTracker>>& allConditionTrackers,
- const vector<ConditionState>& initialConditionCache,
- vector<sp<MetricProducer>>& allMetricProducers,
- unordered_map<int, vector<int>>& conditionToMetricMap,
- unordered_map<int, vector<int>>& trackerToMetricMap,
- unordered_map<int64_t, int>& metricMap, std::set<int64_t>& noReportMetricIds,
- unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
- unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation) {
+optional<InvalidConfigReason> initMetrics(
+ const ConfigKey& key, const StatsdConfig& config, const int64_t timeBaseTimeNs,
+ const int64_t currentTimeNs, const sp<StatsPullerManager>& pullerManager,
+ const unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ const unordered_map<int64_t, int>& conditionTrackerMap,
+ const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ const unordered_map<int64_t, int>& stateAtomIdMap,
+ const unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
+ vector<sp<ConditionTracker>>& allConditionTrackers,
+ const vector<ConditionState>& initialConditionCache,
+ vector<sp<MetricProducer>>& allMetricProducers,
+ unordered_map<int, vector<int>>& conditionToMetricMap,
+ unordered_map<int, vector<int>>& trackerToMetricMap, unordered_map<int64_t, int>& metricMap,
+ std::set<int64_t>& noReportMetricIds,
+ unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
+ unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
+ vector<int>& metricsWithActivation) {
sp<ConditionWizard> wizard = new ConditionWizard(allConditionTrackers);
sp<EventMatcherWizard> matcherWizard = new EventMatcherWizard(allAtomMatchingTrackers);
const int allMetricsCount = config.count_metric_size() + config.duration_metric_size() +
config.event_metric_size() + config.gauge_metric_size() +
config.value_metric_size() + config.kll_metric_size();
allMetricProducers.reserve(allMetricsCount);
+ optional<InvalidConfigReason> invalidConfigReason;
+
+ if (config.has_restricted_metrics_delegate_package_name() &&
+ allMetricsCount != config.event_metric_size()) {
+ ALOGE("Restricted metrics only support event metric");
+ return InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED);
+ }
// Construct map from metric id to metric activation index. The map will be used to determine
// the metric activation corresponding to a metric.
@@ -1158,7 +1460,8 @@
int64_t metricId = metricActivation.metric_id();
if (metricToActivationMap.find(metricId) != metricToActivationMap.end()) {
ALOGE("Metric %lld has multiple MetricActivations", (long long)metricId);
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_HAS_MULTIPLE_ACTIVATIONS,
+ metricId);
}
metricToActivationMap.insert({metricId, i});
}
@@ -1175,9 +1478,9 @@
conditionTrackerMap, initialConditionCache, wizard, stateAtomIdMap,
allStateGroupMaps, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation);
+ metricsWithActivation, invalidConfigReason);
if (!producer) {
- return false;
+ return invalidConfigReason;
}
allMetricProducers.push_back(producer.value());
}
@@ -1194,9 +1497,9 @@
conditionTrackerMap, initialConditionCache, wizard, stateAtomIdMap,
allStateGroupMaps, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation);
+ metricsWithActivation, invalidConfigReason);
if (!producer) {
- return false;
+ return invalidConfigReason;
}
allMetricProducers.push_back(producer.value());
}
@@ -1211,9 +1514,9 @@
atomMatchingTrackerMap, allConditionTrackers, conditionTrackerMap,
initialConditionCache, wizard, metricToActivationMap, trackerToMetricMap,
conditionToMetricMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation);
+ deactivationAtomTrackerToMetricMap, metricsWithActivation, invalidConfigReason);
if (!producer) {
- return false;
+ return invalidConfigReason;
}
allMetricProducers.push_back(producer.value());
}
@@ -1229,9 +1532,9 @@
conditionTrackerMap, initialConditionCache, wizard, matcherWizard, stateAtomIdMap,
allStateGroupMaps, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation);
+ metricsWithActivation, invalidConfigReason);
if (!producer) {
- return false;
+ return invalidConfigReason;
}
allMetricProducers.push_back(producer.value());
}
@@ -1247,9 +1550,9 @@
conditionTrackerMap, initialConditionCache, wizard, matcherWizard, stateAtomIdMap,
allStateGroupMaps, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation);
+ metricsWithActivation, invalidConfigReason);
if (!producer) {
- return false;
+ return invalidConfigReason;
}
allMetricProducers.push_back(producer.value());
}
@@ -1265,9 +1568,9 @@
conditionTrackerMap, initialConditionCache, wizard, matcherWizard,
metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation);
+ metricsWithActivation, invalidConfigReason);
if (!producer) {
- return false;
+ return invalidConfigReason;
}
allMetricProducers.push_back(producer.value());
}
@@ -1275,7 +1578,8 @@
const auto no_report_metric = config.no_report_metric(i);
if (metricMap.find(no_report_metric) == metricMap.end()) {
ALOGW("no_report_metric %" PRId64 " not exist", no_report_metric);
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_NO_REPORT_METRIC_NOT_FOUND,
+ no_report_metric);
}
noReportMetricIds.insert(no_report_metric);
}
@@ -1290,40 +1594,41 @@
if (whitelistedAtomIds.find(atomId) == whitelistedAtomIds.end()) {
StateManager::getInstance().registerListener(atomId, it);
} else {
- return false;
+ return InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_SLICED_STATE_ATOM_ALLOWED_FROM_ANY_UID,
+ it->getMetricId());
}
}
}
- return true;
+ return nullopt;
}
-bool initAlerts(const StatsdConfig& config, const int64_t currentTimeNs,
- const unordered_map<int64_t, int>& metricProducerMap,
- unordered_map<int64_t, int>& alertTrackerMap,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- vector<sp<MetricProducer>>& allMetricProducers,
- vector<sp<AnomalyTracker>>& allAnomalyTrackers) {
+optional<InvalidConfigReason> initAlerts(const StatsdConfig& config, const int64_t currentTimeNs,
+ const unordered_map<int64_t, int>& metricProducerMap,
+ unordered_map<int64_t, int>& alertTrackerMap,
+ const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ vector<sp<MetricProducer>>& allMetricProducers,
+ vector<sp<AnomalyTracker>>& allAnomalyTrackers) {
+ optional<InvalidConfigReason> invalidConfigReason;
for (int i = 0; i < config.alert_size(); i++) {
const Alert& alert = config.alert(i);
alertTrackerMap.insert(std::make_pair(alert.id(), allAnomalyTrackers.size()));
- optional<sp<AnomalyTracker>> anomalyTracker =
- createAnomalyTracker(alert, anomalyAlarmMonitor, UpdateStatus::UPDATE_NEW,
- currentTimeNs, metricProducerMap, allMetricProducers);
+ optional<sp<AnomalyTracker>> anomalyTracker = createAnomalyTracker(
+ alert, anomalyAlarmMonitor, UpdateStatus::UPDATE_NEW, currentTimeNs,
+ metricProducerMap, allMetricProducers, invalidConfigReason);
if (!anomalyTracker) {
- return false;
+ return invalidConfigReason;
}
allAnomalyTrackers.push_back(anomalyTracker.value());
}
- if (!initSubscribersForSubscriptionType(config, Subscription::ALERT, alertTrackerMap,
- allAnomalyTrackers)) {
- return false;
- }
- return true;
+ return initSubscribersForSubscriptionType(config, Subscription::ALERT, alertTrackerMap,
+ allAnomalyTrackers);
}
-bool initAlarms(const StatsdConfig& config, const ConfigKey& key,
- const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
- const int64_t currentTimeNs, vector<sp<AlarmTracker>>& allAlarmTrackers) {
+optional<InvalidConfigReason> initAlarms(const StatsdConfig& config, const ConfigKey& key,
+ const sp<AlarmMonitor>& periodicAlarmMonitor,
+ const int64_t timeBaseNs, const int64_t currentTimeNs,
+ vector<sp<AlarmTracker>>& allAlarmTrackers) {
unordered_map<int64_t, int> alarmTrackerMap;
int64_t startMillis = timeBaseNs / 1000 / 1000;
int64_t currentTimeMillis = currentTimeNs / 1000 / 1000;
@@ -1331,44 +1636,43 @@
const Alarm& alarm = config.alarm(i);
if (alarm.offset_millis() <= 0) {
ALOGW("Alarm offset_millis should be larger than 0.");
- return false;
+ return createInvalidConfigReasonWithAlarm(
+ INVALID_CONFIG_REASON_ALARM_OFFSET_LESS_THAN_OR_EQUAL_ZERO, alarm.id());
}
if (alarm.period_millis() <= 0) {
ALOGW("Alarm period_millis should be larger than 0.");
- return false;
+ return createInvalidConfigReasonWithAlarm(
+ INVALID_CONFIG_REASON_ALARM_PERIOD_LESS_THAN_OR_EQUAL_ZERO, alarm.id());
}
alarmTrackerMap.insert(std::make_pair(alarm.id(), allAlarmTrackers.size()));
allAlarmTrackers.push_back(
new AlarmTracker(startMillis, currentTimeMillis, alarm, key, periodicAlarmMonitor));
}
- if (!initSubscribersForSubscriptionType(config, Subscription::ALARM, alarmTrackerMap,
- allAlarmTrackers)) {
- return false;
- }
- return true;
+ return initSubscribersForSubscriptionType(config, Subscription::ALARM, alarmTrackerMap,
+ allAlarmTrackers);
}
-bool initStatsdConfig(const ConfigKey& key, const StatsdConfig& config, const sp<UidMap>& uidMap,
- const sp<StatsPullerManager>& pullerManager,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
- const int64_t currentTimeNs, set<int>& allTagIds,
- vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- unordered_map<int64_t, int>& atomMatchingTrackerMap,
- vector<sp<ConditionTracker>>& allConditionTrackers,
- unordered_map<int64_t, int>& conditionTrackerMap,
- vector<sp<MetricProducer>>& allMetricProducers,
- unordered_map<int64_t, int>& metricProducerMap,
- vector<sp<AnomalyTracker>>& allAnomalyTrackers,
- vector<sp<AlarmTracker>>& allPeriodicAlarmTrackers,
- unordered_map<int, std::vector<int>>& conditionToMetricMap,
- unordered_map<int, std::vector<int>>& trackerToMetricMap,
- unordered_map<int, std::vector<int>>& trackerToConditionMap,
- unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
- unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
- unordered_map<int64_t, int>& alertTrackerMap,
- vector<int>& metricsWithActivation, map<int64_t, uint64_t>& stateProtoHashes,
- set<int64_t>& noReportMetricIds) {
+optional<InvalidConfigReason> initStatsdConfig(
+ const ConfigKey& key, const StatsdConfig& config, const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerManager, const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
+ const int64_t currentTimeNs,
+ std::unordered_map<int, std::vector<int>>& allTagIdsToMatchersMap,
+ vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ vector<sp<ConditionTracker>>& allConditionTrackers,
+ unordered_map<int64_t, int>& conditionTrackerMap,
+ vector<sp<MetricProducer>>& allMetricProducers,
+ unordered_map<int64_t, int>& metricProducerMap,
+ vector<sp<AnomalyTracker>>& allAnomalyTrackers,
+ vector<sp<AlarmTracker>>& allPeriodicAlarmTrackers,
+ unordered_map<int, std::vector<int>>& conditionToMetricMap,
+ unordered_map<int, std::vector<int>>& trackerToMetricMap,
+ unordered_map<int, std::vector<int>>& trackerToConditionMap,
+ unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
+ unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
+ unordered_map<int64_t, int>& alertTrackerMap, vector<int>& metricsWithActivation,
+ map<int64_t, uint64_t>& stateProtoHashes, set<int64_t>& noReportMetricIds) {
vector<ConditionState> initialConditionCache;
unordered_map<int64_t, int> stateAtomIdMap;
unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
@@ -1376,47 +1680,59 @@
if (config.package_certificate_hash_size_bytes() > UINT8_MAX) {
ALOGE("Invalid value for package_certificate_hash_size_bytes: %d",
config.package_certificate_hash_size_bytes());
- return false;
+ return InvalidConfigReason(INVALID_CONFIG_REASON_PACKAGE_CERT_HASH_SIZE_TOO_LARGE);
}
- if (!initAtomMatchingTrackers(config, uidMap, atomMatchingTrackerMap, allAtomMatchingTrackers,
- allTagIds)) {
+ optional<InvalidConfigReason> invalidConfigReason =
+ initAtomMatchingTrackers(config, uidMap, atomMatchingTrackerMap,
+ allAtomMatchingTrackers, allTagIdsToMatchersMap);
+ if (invalidConfigReason.has_value()) {
ALOGE("initAtomMatchingTrackers failed");
- return false;
+ return invalidConfigReason;
}
VLOG("initAtomMatchingTrackers succeed...");
- if (!initConditions(key, config, atomMatchingTrackerMap, conditionTrackerMap,
- allConditionTrackers, trackerToConditionMap, initialConditionCache)) {
+ invalidConfigReason =
+ initConditions(key, config, atomMatchingTrackerMap, conditionTrackerMap,
+ allConditionTrackers, trackerToConditionMap, initialConditionCache);
+ if (invalidConfigReason.has_value()) {
ALOGE("initConditionTrackers failed");
- return false;
+ return invalidConfigReason;
}
- if (!initStates(config, stateAtomIdMap, allStateGroupMaps, stateProtoHashes)) {
+ invalidConfigReason = initStates(config, stateAtomIdMap, allStateGroupMaps, stateProtoHashes);
+ if (invalidConfigReason.has_value()) {
ALOGE("initStates failed");
- return false;
- }
- if (!initMetrics(key, config, timeBaseNs, currentTimeNs, pullerManager, atomMatchingTrackerMap,
- conditionTrackerMap, allAtomMatchingTrackers, stateAtomIdMap,
- allStateGroupMaps, allConditionTrackers, initialConditionCache,
- allMetricProducers, conditionToMetricMap, trackerToMetricMap,
- metricProducerMap, noReportMetricIds, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, metricsWithActivation)) {
- ALOGE("initMetricProducers failed");
- return false;
- }
- if (!initAlerts(config, currentTimeNs, metricProducerMap, alertTrackerMap, anomalyAlarmMonitor,
- allMetricProducers, allAnomalyTrackers)) {
- ALOGE("initAlerts failed");
- return false;
- }
- if (!initAlarms(config, key, periodicAlarmMonitor, timeBaseNs, currentTimeNs,
- allPeriodicAlarmTrackers)) {
- ALOGE("initAlarms failed");
- return false;
+ return invalidConfigReason;
}
- return true;
+ invalidConfigReason = initMetrics(
+ key, config, timeBaseNs, currentTimeNs, pullerManager, atomMatchingTrackerMap,
+ conditionTrackerMap, allAtomMatchingTrackers, stateAtomIdMap, allStateGroupMaps,
+ allConditionTrackers, initialConditionCache, allMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, metricProducerMap, noReportMetricIds,
+ activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
+ metricsWithActivation);
+ if (invalidConfigReason.has_value()) {
+ ALOGE("initMetricProducers failed");
+ return invalidConfigReason;
+ }
+
+ invalidConfigReason = initAlerts(config, currentTimeNs, metricProducerMap, alertTrackerMap,
+ anomalyAlarmMonitor, allMetricProducers, allAnomalyTrackers);
+ if (invalidConfigReason.has_value()) {
+ ALOGE("initAlerts failed");
+ return invalidConfigReason;
+ }
+
+ invalidConfigReason = initAlarms(config, key, periodicAlarmMonitor, timeBaseNs, currentTimeNs,
+ allPeriodicAlarmTrackers);
+ if (invalidConfigReason.has_value()) {
+ ALOGE("initAlarms failed");
+ return invalidConfigReason;
+ }
+
+ return nullopt;
}
} // namespace statsd
diff --git a/statsd/src/metrics/parsing_utils/metrics_manager_util.h b/statsd/src/metrics/parsing_utils/metrics_manager_util.h
index 47306d4..c4e231a 100644
--- a/statsd/src/metrics/parsing_utils/metrics_manager_util.h
+++ b/statsd/src/metrics/parsing_utils/metrics_manager_util.h
@@ -37,51 +37,54 @@
// input:
// [logMatcher]: the input AtomMatcher from the StatsdConfig
// [index]: the index of the matcher
+// [invalidConfigReason]: logging ids if config is invalid
// output:
// new AtomMatchingTracker, or null if the tracker is unable to be created
-sp<AtomMatchingTracker> createAtomMatchingTracker(const AtomMatcher& logMatcher, const int index,
- const sp<UidMap>& uidMap);
+sp<AtomMatchingTracker> createAtomMatchingTracker(
+ const AtomMatcher& logMatcher, const int index, const sp<UidMap>& uidMap,
+ optional<InvalidConfigReason>& invalidConfigReason);
// Create a ConditionTracker.
// input:
// [predicate]: the input Predicate from the StatsdConfig
// [index]: the index of the condition tracker
// [atomMatchingTrackerMap]: map of atom matcher id to its index in allAtomMatchingTrackers
+// [invalidConfigReason]: logging ids if config is invalid
// output:
// new ConditionTracker, or null if the tracker is unable to be created
sp<ConditionTracker> createConditionTracker(
const ConfigKey& key, const Predicate& predicate, const int index,
- const unordered_map<int64_t, int>& atomMatchingTrackerMap);
+ const unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ optional<InvalidConfigReason>& invalidConfigReason);
// Get the hash of a metric, combining the activation if the metric has one.
-bool getMetricProtoHash(const StatsdConfig& config, const google::protobuf::MessageLite& metric,
- const int64_t id,
- const std::unordered_map<int64_t, int>& metricToActivationMap,
- uint64_t& metricHash);
+optional<InvalidConfigReason> getMetricProtoHash(
+ const StatsdConfig& config, const google::protobuf::MessageLite& metric, const int64_t id,
+ const std::unordered_map<int64_t, int>& metricToActivationMap, uint64_t& metricHash);
// 1. Validates matcher existence
// 2. Enforces matchers with dimensions and those used for trigger_event are about one atom
// 3. Gets matcher index and updates tracker to metric map
-bool handleMetricWithAtomMatchingTrackers(
- const int64_t matcherId, const int metricIndex, const bool enforceOneAtom,
+optional<InvalidConfigReason> handleMetricWithAtomMatchingTrackers(
+ const int64_t matcherId, const int64_t metricId, const int metricIndex,
+ const bool enforceOneAtom,
const std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
std::unordered_map<int, std::vector<int>>& trackerToMetricMap, int& logTrackerIndex);
// 1. Validates condition existence, including those in links
// 2. Gets condition index and updates condition to metric map
-bool handleMetricWithConditions(
- const int64_t condition, const int metricIndex,
+optional<InvalidConfigReason> handleMetricWithConditions(
+ const int64_t condition, const int64_t metricId, const int metricIndex,
const std::unordered_map<int64_t, int>& conditionTrackerMap,
- const ::google::protobuf::RepeatedPtrField<::android::os::statsd::MetricConditionLink>&
- links,
+ const ::google::protobuf::RepeatedPtrField<MetricConditionLink>& links,
const std::vector<sp<ConditionTracker>>& allConditionTrackers, int& conditionIndex,
std::unordered_map<int, std::vector<int>>& conditionToMetricMap);
// Validates a metricActivation and populates state.
// Fills the new event activation/deactivation maps, preserving the existing activations.
-// Returns false if there are errors.
-bool handleMetricActivationOnConfigUpdate(
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> handleMetricActivationOnConfigUpdate(
const StatsdConfig& config, const int64_t metricId, const int metricIndex,
const std::unordered_map<int64_t, int>& metricToActivationMap,
const std::unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
@@ -110,7 +113,8 @@
std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
std::unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
- std::vector<int>& metricsWithActivation);
+ std::vector<int>& metricsWithActivation,
+ optional<InvalidConfigReason>& invalidConfigReason);
// Creates a DurationMetricProducer and updates the vectors/maps used by MetricsManager with
// the appropriate indices. Returns an sp to the producer, or nullopt if there was an error.
@@ -129,7 +133,8 @@
std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
std::unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
- std::vector<int>& metricsWithActivation);
+ std::vector<int>& metricsWithActivation,
+ optional<InvalidConfigReason>& invalidConfigReason);
// Creates an EventMetricProducer and updates the vectors/maps used by MetricsManager with
// the appropriate indices. Returns an sp to the producer, or nullopt if there was an error.
@@ -146,7 +151,8 @@
std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
std::unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
- std::vector<int>& metricsWithActivation);
+ std::vector<int>& metricsWithActivation,
+ optional<InvalidConfigReason>& invalidConfigReason);
// Creates a NumericValueMetricProducer and updates the vectors/maps used by MetricsManager with
// the appropriate indices. Returns an sp to the producer, or nullopt if there was an error.
@@ -167,7 +173,8 @@
std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
std::unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
- std::vector<int>& metricsWithActivation);
+ std::vector<int>& metricsWithActivation,
+ optional<InvalidConfigReason>& invalidConfigReason);
// Creates a GaugeMetricProducer and updates the vectors/maps used by MetricsManager with
// the appropriate indices. Returns an sp to the producer, or nullopt if there was an error.
@@ -186,7 +193,8 @@
std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
std::unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
- std::vector<int>& metricsWithActivation);
+ std::vector<int>& metricsWithActivation,
+ optional<InvalidConfigReason>& invalidConfigReason);
// Creates a KllMetricProducer and updates the vectors/maps used by MetricsManager with
// the appropriate indices. Returns an sp to the producer, or nullopt if there was an error.
@@ -207,7 +215,7 @@
unordered_map<int, vector<int>>& conditionToMetricMap,
unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
- vector<int>& metricsWithActivation);
+ vector<int>& metricsWithActivation, optional<InvalidConfigReason>& invalidConfigReason);
// Creates an AnomalyTracker and adds it to the appropriate metric.
// Returns an sp to the AnomalyTracker, or nullopt if there was an error.
@@ -215,14 +223,15 @@
const Alert& alert, const sp<AlarmMonitor>& anomalyAlarmMonitor,
const UpdateStatus& updateStatus, const int64_t currentTimeNs,
const std::unordered_map<int64_t, int>& metricProducerMap,
- std::vector<sp<MetricProducer>>& allMetricProducers);
+ std::vector<sp<MetricProducer>>& allMetricProducers,
+ optional<InvalidConfigReason>& invalidConfigReason);
-// Templated function for adding subscriptions to alarms or alerts. Returns true if successful.
+// Templated function for adding subscriptions to alarms or alerts. Returns nullopt if successful
+// and InvalidConfigReason if not.
template <typename T>
-bool initSubscribersForSubscriptionType(const StatsdConfig& config,
- const Subscription_RuleType ruleType,
- const std::unordered_map<int64_t, int>& ruleMap,
- std::vector<T>& allRules) {
+optional<InvalidConfigReason> initSubscribersForSubscriptionType(
+ const StatsdConfig& config, const Subscription_RuleType ruleType,
+ const std::unordered_map<int64_t, int>& ruleMap, std::vector<T>& allRules) {
for (int i = 0; i < config.subscription_size(); ++i) {
const Subscription& subscription = config.subscription(i);
if (subscription.rule_type() != ruleType) {
@@ -231,18 +240,31 @@
if (subscription.subscriber_information_case() ==
Subscription::SubscriberInformationCase::SUBSCRIBER_INFORMATION_NOT_SET) {
ALOGW("subscription \"%lld\" has no subscriber info.\"", (long long)subscription.id());
- return false;
+ return createInvalidConfigReasonWithSubscription(
+ INVALID_CONFIG_REASON_SUBSCRIPTION_SUBSCRIBER_INFO_MISSING, subscription.id());
}
const auto& itr = ruleMap.find(subscription.rule_id());
if (itr == ruleMap.end()) {
ALOGW("subscription \"%lld\" has unknown rule id: \"%lld\"",
(long long)subscription.id(), (long long)subscription.rule_id());
- return false;
+ switch (subscription.rule_type()) {
+ case Subscription::ALARM:
+ return createInvalidConfigReasonWithSubscriptionAndAlarm(
+ INVALID_CONFIG_REASON_SUBSCRIPTION_RULE_NOT_FOUND, subscription.id(),
+ subscription.rule_id());
+ case Subscription::ALERT:
+ return createInvalidConfigReasonWithSubscriptionAndAlert(
+ INVALID_CONFIG_REASON_SUBSCRIPTION_RULE_NOT_FOUND, subscription.id(),
+ subscription.rule_id());
+ case Subscription::RULE_TYPE_UNSPECIFIED:
+ return createInvalidConfigReasonWithSubscription(
+ INVALID_CONFIG_REASON_SUBSCRIPTION_RULE_NOT_FOUND, subscription.id());
+ }
}
const int ruleIndex = itr->second;
allRules[ruleIndex]->addSubscription(subscription);
}
- return true;
+ return nullopt;
}
// Helper functions for MetricsManager to initialize from StatsdConfig.
@@ -258,11 +280,13 @@
// output:
// [atomMatchingTrackerMap]: this map should contain matcher name to index mapping
// [allAtomMatchingTrackers]: should store the sp to all the AtomMatchingTracker
-// [allTagIds]: contains the set of all interesting tag ids to this config.
-bool initAtomMatchingTrackers(const StatsdConfig& config, const sp<UidMap>& uidMap,
- std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
- std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- std::set<int>& allTagIds);
+// [allTagIdsToMatchersMap]: maps of tag ids to atom matchers
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> initAtomMatchingTrackers(
+ const StatsdConfig& config, const sp<UidMap>& uidMap,
+ std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ std::unordered_map<int, std::vector<int>>& allTagIdsToMatchersMap);
// Initialize ConditionTrackers
// input:
@@ -275,12 +299,14 @@
// [trackerToConditionMap]: contain the mapping from index of
// log tracker to condition trackers that use the log tracker
// [initialConditionCache]: stores the initial conditions for each ConditionTracker
-bool initConditions(const ConfigKey& key, const StatsdConfig& config,
- const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
- std::unordered_map<int64_t, int>& conditionTrackerMap,
- std::vector<sp<ConditionTracker>>& allConditionTrackers,
- std::unordered_map<int, std::vector<int>>& trackerToConditionMap,
- std::vector<ConditionState>& initialConditionCache);
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> initConditions(
+ const ConfigKey& key, const StatsdConfig& config,
+ const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ std::unordered_map<int64_t, int>& conditionTrackerMap,
+ std::vector<sp<ConditionTracker>>& allConditionTrackers,
+ std::unordered_map<int, std::vector<int>>& trackerToConditionMap,
+ std::vector<ConditionState>& initialConditionCache);
// Initialize State maps using State protos in the config. These maps will
// eventually be passed to MetricProducers to initialize their state info.
@@ -291,9 +317,11 @@
// [allStateGroupMaps]: this map should contain the mapping from states ids and state
// values to state group ids for all states
// [stateProtoHashes]: contains a map of state id to the hash of the State proto from the config
-bool initStates(const StatsdConfig& config, unordered_map<int64_t, int>& stateAtomIdMap,
- unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
- std::map<int64_t, uint64_t>& stateProtoHashes);
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> initStates(
+ const StatsdConfig& config, unordered_map<int64_t, int>& stateAtomIdMap,
+ unordered_map<int64_t, unordered_map<int, int64_t>>& allStateGroupMaps,
+ std::map<int64_t, uint64_t>& stateProtoHashes);
// Initialize MetricProducers.
// input:
@@ -310,7 +338,8 @@
// [conditionToMetricMap]: contains the mapping from condition tracker index to
// the list of MetricProducer index
// [trackerToMetricMap]: contains the mapping from log tracker to MetricProducer index.
-bool initMetrics(
+// Returns nullopt if successful and InvalidConfigReason if not.
+optional<InvalidConfigReason> initMetrics(
const ConfigKey& key, const StatsdConfig& config, const int64_t timeBaseTimeNs,
const int64_t currentTimeNs, const sp<StatsPullerManager>& pullerManager,
const std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
@@ -330,34 +359,34 @@
// Initialize alarms
// Is called both on initialize new configs and config updates since alarms do not have any state.
-bool initAlarms(const StatsdConfig& config, const ConfigKey& key,
- const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
- const int64_t currentTimeNs, std::vector<sp<AlarmTracker>>& allAlarmTrackers);
+optional<InvalidConfigReason> initAlarms(const StatsdConfig& config, const ConfigKey& key,
+ const sp<AlarmMonitor>& periodicAlarmMonitor,
+ const int64_t timeBaseNs, const int64_t currentTimeNs,
+ std::vector<sp<AlarmTracker>>& allAlarmTrackers);
// Initialize MetricsManager from StatsdConfig.
// Parameters are the members of MetricsManager. See MetricsManager for declaration.
-bool initStatsdConfig(const ConfigKey& key, const StatsdConfig& config, const sp<UidMap>& uidMap,
- const sp<StatsPullerManager>& pullerManager,
- const sp<AlarmMonitor>& anomalyAlarmMonitor,
- const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
- const int64_t currentTimeNs, std::set<int>& allTagIds,
- std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
- std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
- std::vector<sp<ConditionTracker>>& allConditionTrackers,
- std::unordered_map<int64_t, int>& conditionTrackerMap,
- std::vector<sp<MetricProducer>>& allMetricProducers,
- std::unordered_map<int64_t, int>& metricProducerMap,
- vector<sp<AnomalyTracker>>& allAnomalyTrackers,
- vector<sp<AlarmTracker>>& allPeriodicAlarmTrackers,
- std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
- std::unordered_map<int, std::vector<int>>& trackerToMetricMap,
- std::unordered_map<int, std::vector<int>>& trackerToConditionMap,
- std::unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
- std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
- std::unordered_map<int64_t, int>& alertTrackerMap,
- std::vector<int>& metricsWithActivation,
- std::map<int64_t, uint64_t>& stateProtoHashes,
- std::set<int64_t>& noReportMetricIds);
+optional<InvalidConfigReason> initStatsdConfig(
+ const ConfigKey& key, const StatsdConfig& config, const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerManager, const sp<AlarmMonitor>& anomalyAlarmMonitor,
+ const sp<AlarmMonitor>& periodicAlarmMonitor, const int64_t timeBaseNs,
+ const int64_t currentTimeNs,
+ std::unordered_map<int, std::vector<int>>& allTagIdsToMatchersMap,
+ std::vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
+ std::unordered_map<int64_t, int>& atomMatchingTrackerMap,
+ std::vector<sp<ConditionTracker>>& allConditionTrackers,
+ std::unordered_map<int64_t, int>& conditionTrackerMap,
+ std::vector<sp<MetricProducer>>& allMetricProducers,
+ std::unordered_map<int64_t, int>& metricProducerMap,
+ vector<sp<AnomalyTracker>>& allAnomalyTrackers,
+ vector<sp<AlarmTracker>>& allPeriodicAlarmTrackers,
+ std::unordered_map<int, std::vector<int>>& conditionToMetricMap,
+ std::unordered_map<int, std::vector<int>>& trackerToMetricMap,
+ std::unordered_map<int, std::vector<int>>& trackerToConditionMap,
+ std::unordered_map<int, std::vector<int>>& activationAtomTrackerToMetricMap,
+ std::unordered_map<int, std::vector<int>>& deactivationAtomTrackerToMetricMap,
+ std::unordered_map<int64_t, int>& alertTrackerMap, std::vector<int>& metricsWithActivation,
+ std::map<int64_t, uint64_t>& stateProtoHashes, std::set<int64_t>& noReportMetricIds);
} // namespace statsd
} // namespace os
diff --git a/statsd/src/packages/UidMap.cpp b/statsd/src/packages/UidMap.cpp
index 6d5be2e..a483247 100644
--- a/statsd/src/packages/UidMap.cpp
+++ b/statsd/src/packages/UidMap.cpp
@@ -71,7 +71,7 @@
const int FIELD_ID_CHANGE_NEW_VERSION_STRING_HASH = 10;
const int FIELD_ID_CHANGE_PREV_VERSION_STRING_HASH = 11;
-UidMap::UidMap() : mBytesUsed(0), mIncludeCertificateHash(false) {
+UidMap::UidMap() : mBytesUsed(0) {
}
UidMap::~UidMap() {}
@@ -351,7 +351,7 @@
FIELD_ID_SNAPSHOT_PACKAGE_INFO);
// Get installer index.
int installerIndex = -1;
- if (includeInstaller && mIncludeCertificateHash && installerIndices != nullptr) {
+ if (includeInstaller && installerIndices != nullptr) {
const auto& it = installerIndices->find(appData.installer);
if (it == installerIndices->end()) {
// We have not encountered this installer yet; add it to installerIndices.
@@ -400,17 +400,13 @@
}
}
- if (mIncludeCertificateHash) {
- const size_t dumpHashSize =
- truncatedCertificateHashSize <= appData.certificateHash.size()
- ? truncatedCertificateHashSize
- : appData.certificateHash.size();
- if (dumpHashSize > 0) {
- proto->write(
- FIELD_TYPE_BYTES | FIELD_ID_SNAPSHOT_PACKAGE_TRUNCATED_CERTIFICATE_HASH,
- reinterpret_cast<const char*>(appData.certificateHash.data()),
- dumpHashSize);
- }
+ const size_t dumpHashSize = truncatedCertificateHashSize <= appData.certificateHash.size()
+ ? truncatedCertificateHashSize
+ : appData.certificateHash.size();
+ if (dumpHashSize > 0) {
+ proto->write(FIELD_TYPE_BYTES | FIELD_ID_SNAPSHOT_PACKAGE_TRUNCATED_CERTIFICATE_HASH,
+ reinterpret_cast<const char*>(appData.certificateHash.data()),
+ dumpHashSize);
}
proto->write(FIELD_TYPE_INT64 | FIELD_ID_SNAPSHOT_PACKAGE_VERSION,
@@ -481,7 +477,7 @@
installers[index] = installer;
}
- if (includeInstaller && mIncludeCertificateHash) {
+ if (includeInstaller) {
// Write installer list; either strings or hashes.
for (const string& installerName : installers) {
if (str_set == nullptr) { // Strings not hashed
@@ -554,11 +550,6 @@
return results;
}
-void UidMap::setIncludeCertificateHash(const bool include) {
- lock_guard<mutex> lock(mMutex);
- mIncludeCertificateHash = include;
-}
-
// Note not all the following AIDs are used as uids. Some are used only for gids.
// It's ok to leave them in the map, but we won't ever see them in the log's uid field.
// App's uid starts from 10000, and will not overlap with the following AIDs.
@@ -646,6 +637,16 @@
{"AID_CONTEXT_HUB", 1080},
{"AID_VIRTUALIZATIONSERVICE", 1081},
{"AID_ARTD", 1082},
+ {"AID_UWB", 1083},
+ {"AID_THREAD_NETWORK", 1084},
+ {"AID_DICED", 1085},
+ {"AID_DMESGD", 1086},
+ {"AID_JC_WEAVER", 1087},
+ {"AID_JC_STRONGBOX", 1088},
+ {"AID_JC_IDENTITYCRED", 1089},
+ {"AID_SDK_SANDBOX", 1090},
+ {"AID_SECURITY_LOG_WRITER", 1091},
+ {"AID_PRNG_SEEDER", 1092},
{"AID_SHELL", 2000},
{"AID_CACHE", 2001},
{"AID_DIAG", 2002},
diff --git a/statsd/src/packages/UidMap.h b/statsd/src/packages/UidMap.h
index e5cd224..43f9967 100644
--- a/statsd/src/packages/UidMap.h
+++ b/statsd/src/packages/UidMap.h
@@ -169,8 +169,6 @@
std::map<string, int>* installerIndices, std::set<string>* str_set,
ProtoOutputStream* proto) const;
- void setIncludeCertificateHash(const bool include);
-
private:
std::set<string> getAppNamesFromUidLocked(const int32_t& uid, bool returnNormalized) const;
string normalizeAppName(const string& appName) const;
@@ -227,9 +225,10 @@
// Cache the size of mOutput;
size_t mBytesUsed;
- bool mIncludeCertificateHash;
-
// Allows unit-test to access private methods.
+ FRIEND_TEST(RestrictedEventMetricE2eTest, TestRestrictedConfigUpdateDoesNotUpdateUidMap);
+ FRIEND_TEST(RestrictedEventMetricE2eTest,
+ TestRestrictedConfigUpdateAddsDelegateRemovesUidMapEntry);
FRIEND_TEST(UidMapTest, TestClearingOutput);
FRIEND_TEST(UidMapTest, TestRemovedAppRetained);
FRIEND_TEST(UidMapTest, TestRemovedAppOverGuardrail);
diff --git a/statsd/src/shell/ShellSubscriber.cpp b/statsd/src/shell/ShellSubscriber.cpp
index 35e64e4..3b0c606 100644
--- a/statsd/src/shell/ShellSubscriber.cpp
+++ b/statsd/src/shell/ShellSubscriber.cpp
@@ -19,225 +19,187 @@
#include "ShellSubscriber.h"
#include <android-base/file.h>
+#include <inttypes.h>
+#include <utils/Timers.h>
-#include "matchers/matcher_util.h"
#include "stats_log_util.h"
-using android::util::ProtoOutputStream;
+using aidl::android::os::IStatsSubscriptionCallback;
namespace android {
namespace os {
namespace statsd {
-const static int FIELD_ID_ATOM = 1;
-
-void ShellSubscriber::startNewSubscription(int in, int out, int timeoutSec) {
- int myToken = claimToken();
- VLOG("ShellSubscriber: new subscription %d has come in", myToken);
- mSubscriptionShouldEnd.notify_one();
-
- shared_ptr<SubscriptionInfo> mySubscriptionInfo = make_shared<SubscriptionInfo>(in, out);
- if (!readConfig(mySubscriptionInfo)) return;
-
+ShellSubscriber::~ShellSubscriber() {
{
std::unique_lock<std::mutex> lock(mMutex);
- mSubscriptionInfo = mySubscriptionInfo;
- spawnHelperThread(myToken);
- waitForSubscriptionToEndLocked(mySubscriptionInfo, myToken, lock, timeoutSec);
-
- if (mSubscriptionInfo == mySubscriptionInfo) {
- mSubscriptionInfo = nullptr;
- }
-
+ mClientSet.clear();
+ updateLogEventFilterLocked();
+ }
+ mThreadSleepCV.notify_one();
+ if (mThread.joinable()) {
+ mThread.join();
}
}
-void ShellSubscriber::spawnHelperThread(int myToken) {
- std::thread t([this, myToken] { pullAndSendHeartbeats(myToken); });
- t.detach();
-}
-
-void ShellSubscriber::waitForSubscriptionToEndLocked(shared_ptr<SubscriptionInfo> myInfo,
- int myToken,
- std::unique_lock<std::mutex>& lock,
- int timeoutSec) {
- if (timeoutSec > 0) {
- mSubscriptionShouldEnd.wait_for(lock, timeoutSec * 1s, [this, myToken, &myInfo] {
- return mToken != myToken || !myInfo->mClientAlive;
- });
- } else {
- mSubscriptionShouldEnd.wait(lock, [this, myToken, &myInfo] {
- return mToken != myToken || !myInfo->mClientAlive;
- });
- }
-}
-
-// Atomically claim the next token. Token numbers denote subscriber ordering.
-int ShellSubscriber::claimToken() {
+bool ShellSubscriber::startNewSubscription(int in, int out, int64_t timeoutSec) {
std::unique_lock<std::mutex> lock(mMutex);
- int myToken = ++mToken;
- return myToken;
+ VLOG("ShellSubscriber: new subscription has come in");
+ if (mClientSet.size() >= kMaxSubscriptions) {
+ ALOGE("ShellSubscriber: cannot have another active subscription. Current Subscriptions: "
+ "%zu. Limit: %zu",
+ mClientSet.size(), kMaxSubscriptions);
+ return false;
+ }
+
+ return startNewSubscriptionLocked(ShellSubscriberClient::create(
+ in, out, timeoutSec, getElapsedRealtimeSec(), mUidMap, mPullerMgr));
}
-// Read and parse single config. There should only one config per input.
-bool ShellSubscriber::readConfig(shared_ptr<SubscriptionInfo> subscriptionInfo) {
- // Read the size of the config.
- size_t bufferSize;
- if (!android::base::ReadFully(subscriptionInfo->mInputFd, &bufferSize, sizeof(bufferSize))) {
+bool ShellSubscriber::startNewSubscription(const vector<uint8_t>& subscriptionConfig,
+ const shared_ptr<IStatsSubscriptionCallback>& callback) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ VLOG("ShellSubscriber: new subscription has come in");
+ if (mClientSet.size() >= kMaxSubscriptions) {
+ ALOGE("ShellSubscriber: cannot have another active subscription. Current Subscriptions: "
+ "%zu. Limit: %zu",
+ mClientSet.size(), kMaxSubscriptions);
return false;
}
- // Read the config.
- vector<uint8_t> buffer(bufferSize);
- if (!android::base::ReadFully(subscriptionInfo->mInputFd, buffer.data(), bufferSize)) {
- return false;
- }
+ return startNewSubscriptionLocked(ShellSubscriberClient::create(
+ subscriptionConfig, callback, getElapsedRealtimeSec(), mUidMap, mPullerMgr));
+}
- // Parse the config.
- ShellSubscription config;
- if (!config.ParseFromArray(buffer.data(), bufferSize)) {
- return false;
- }
+bool ShellSubscriber::startNewSubscriptionLocked(unique_ptr<ShellSubscriberClient> client) {
+ if (client == nullptr) return false;
- // Update SubscriptionInfo with state from config
- for (const auto& pushed : config.pushed()) {
- subscriptionInfo->mPushedMatchers.push_back(pushed);
- }
+ // Add new valid client to the client set
+ mClientSet.insert(std::move(client));
+ updateLogEventFilterLocked();
- for (const auto& pulled : config.pulled()) {
- vector<string> packages;
- vector<int32_t> uids;
- for (const string& pkg : pulled.packages()) {
- auto it = UidMap::sAidToUidMapping.find(pkg);
- if (it != UidMap::sAidToUidMapping.end()) {
- uids.push_back(it->second);
- } else {
- packages.push_back(pkg);
- }
+ // Only spawn one thread to manage pulling atoms and sending
+ // heartbeats.
+ if (!mThreadAlive) {
+ mThreadAlive = true;
+ if (mThread.joinable()) {
+ mThread.join();
}
-
- subscriptionInfo->mPulledInfo.emplace_back(pulled.matcher(), pulled.freq_millis(), packages,
- uids);
- VLOG("adding matcher for pulled atom %d", pulled.matcher().atom_id());
+ mThread = thread([this] { pullAndSendHeartbeats(); });
}
return true;
}
-void ShellSubscriber::pullAndSendHeartbeats(int myToken) {
- VLOG("ShellSubscriber: helper thread %d starting", myToken);
+// Sends heartbeat signals and sleeps between doing work
+void ShellSubscriber::pullAndSendHeartbeats() {
+ VLOG("ShellSubscriber: helper thread starting");
+ std::unique_lock<std::mutex> lock(mMutex);
while (true) {
- int64_t sleepTimeMs = INT_MAX;
- {
- std::lock_guard<std::mutex> lock(mMutex);
- if (!mSubscriptionInfo || mToken != myToken) {
- VLOG("ShellSubscriber: helper thread %d done!", myToken);
- return;
+ int64_t sleepTimeMs = 24 * 60 * 60 * 1000; // 24 hours.
+ const int64_t nowNanos = getElapsedRealtimeNs();
+ const int64_t nowMillis = nanoseconds_to_milliseconds(nowNanos);
+ const int64_t nowSecs = nanoseconds_to_seconds(nowNanos);
+ for (auto clientIt = mClientSet.begin(); clientIt != mClientSet.end();) {
+ int64_t subscriptionSleepMs =
+ (*clientIt)->pullAndSendHeartbeatsIfNeeded(nowSecs, nowMillis, nowNanos);
+ sleepTimeMs = std::min(sleepTimeMs, subscriptionSleepMs);
+ if ((*clientIt)->isAlive()) {
+ ++clientIt;
+ } else {
+ VLOG("ShellSubscriber: removing client!");
+ clientIt = mClientSet.erase(clientIt);
+ updateLogEventFilterLocked();
}
-
- int64_t nowMillis = getElapsedRealtimeMillis();
- int64_t nowNanos = getElapsedRealtimeNs();
- for (PullInfo& pullInfo : mSubscriptionInfo->mPulledInfo) {
- if (pullInfo.mPrevPullElapsedRealtimeMs + pullInfo.mInterval >= nowMillis) {
- continue;
- }
-
- vector<int32_t> uids;
- getUidsForPullAtom(&uids, pullInfo);
-
- vector<std::shared_ptr<LogEvent>> data;
- mPullerMgr->Pull(pullInfo.mPullerMatcher.atom_id(), uids, nowNanos, &data);
- VLOG("Pulled %zu atoms with id %d", data.size(), pullInfo.mPullerMatcher.atom_id());
- writePulledAtomsLocked(data, pullInfo.mPullerMatcher);
-
- pullInfo.mPrevPullElapsedRealtimeMs = nowMillis;
- }
-
- // Send a heartbeat, consisting of a data size of 0, if perfd hasn't recently received
- // data from statsd. When it receives the data size of 0, perfd will not expect any
- // atoms and recheck whether the subscription should end.
- if (nowMillis - mLastWriteMs > kMsBetweenHeartbeats) {
- attemptWriteToPipeLocked(/*dataSize=*/0);
- }
-
- // Determine how long to sleep before doing more work.
- for (PullInfo& pullInfo : mSubscriptionInfo->mPulledInfo) {
- int64_t nextPullTime = pullInfo.mPrevPullElapsedRealtimeMs + pullInfo.mInterval;
- int64_t timeBeforePull = nextPullTime - nowMillis; // guaranteed to be non-negative
- if (timeBeforePull < sleepTimeMs) sleepTimeMs = timeBeforePull;
- }
- int64_t timeBeforeHeartbeat = (mLastWriteMs + kMsBetweenHeartbeats) - nowMillis;
- if (timeBeforeHeartbeat < sleepTimeMs) sleepTimeMs = timeBeforeHeartbeat;
}
-
- VLOG("ShellSubscriber: helper thread %d sleeping for %lld ms", myToken,
- (long long)sleepTimeMs);
- std::this_thread::sleep_for(std::chrono::milliseconds(sleepTimeMs));
- }
-}
-
-void ShellSubscriber::getUidsForPullAtom(vector<int32_t>* uids, const PullInfo& pullInfo) {
- uids->insert(uids->end(), pullInfo.mPullUids.begin(), pullInfo.mPullUids.end());
- // This is slow. Consider storing the uids per app and listening to uidmap updates.
- for (const string& pkg : pullInfo.mPullPackages) {
- set<int32_t> uidsForPkg = mUidMap->getAppUid(pkg);
- uids->insert(uids->end(), uidsForPkg.begin(), uidsForPkg.end());
- }
- uids->push_back(DEFAULT_PULL_UID);
-}
-
-void ShellSubscriber::writePulledAtomsLocked(const vector<std::shared_ptr<LogEvent>>& data,
- const SimpleAtomMatcher& matcher) {
- mProto.clear();
- int count = 0;
- for (const auto& event : data) {
- if (matchesSimple(mUidMap, matcher, *event)) {
- count++;
- uint64_t atomToken = mProto.start(util::FIELD_TYPE_MESSAGE |
- util::FIELD_COUNT_REPEATED | FIELD_ID_ATOM);
- event->ToProto(mProto);
- mProto.end(atomToken);
+ if (mClientSet.empty()) {
+ mThreadAlive = false;
+ VLOG("ShellSubscriber: helper thread done!");
+ return;
}
+ VLOG("ShellSubscriber: helper thread sleeping for %" PRId64 "ms", sleepTimeMs);
+ mThreadSleepCV.wait_for(lock, sleepTimeMs * 1ms, [this] { return mClientSet.empty(); });
}
-
- if (count > 0) attemptWriteToPipeLocked(mProto.size());
}
void ShellSubscriber::onLogEvent(const LogEvent& event) {
- std::lock_guard<std::mutex> lock(mMutex);
- if (!mSubscriptionInfo) return;
-
- mProto.clear();
- for (const auto& matcher : mSubscriptionInfo->mPushedMatchers) {
- if (matchesSimple(mUidMap, matcher, event)) {
- uint64_t atomToken = mProto.start(util::FIELD_TYPE_MESSAGE |
- util::FIELD_COUNT_REPEATED | FIELD_ID_ATOM);
- event.ToProto(mProto);
- mProto.end(atomToken);
- attemptWriteToPipeLocked(mProto.size());
+ // Skip if event is skipped
+ if (event.isParsedHeaderOnly()) {
+ return;
+ }
+ // Skip RestrictedLogEvents
+ if (event.isRestricted()) {
+ return;
+ }
+ std::unique_lock<std::mutex> lock(mMutex);
+ for (auto clientIt = mClientSet.begin(); clientIt != mClientSet.end();) {
+ (*clientIt)->onLogEvent(event);
+ if ((*clientIt)->isAlive()) {
+ ++clientIt;
+ } else {
+ VLOG("ShellSubscriber: removing client!");
+ clientIt = mClientSet.erase(clientIt);
+ updateLogEventFilterLocked();
}
}
}
-// Tries to write the atom encoded in mProto to the pipe. If the write fails
-// because the read end of the pipe has closed, signals to other threads that
-// the subscription should end.
-void ShellSubscriber::attemptWriteToPipeLocked(size_t dataSize) {
- // First, write the payload size.
- if (!android::base::WriteFully(mSubscriptionInfo->mOutputFd, &dataSize, sizeof(dataSize))) {
- mSubscriptionInfo->mClientAlive = false;
- mSubscriptionShouldEnd.notify_one();
+void ShellSubscriber::flushSubscription(const shared_ptr<IStatsSubscriptionCallback>& callback) {
+ std::unique_lock<std::mutex> lock(mMutex);
+
+ // TODO(b/268822860): Consider storing callback clients in a map keyed by
+ // IStatsSubscriptionCallback to avoid this linear search.
+ for (auto clientIt = mClientSet.begin(); clientIt != mClientSet.end(); ++clientIt) {
+ if ((*clientIt)->hasCallback(callback)) {
+ if ((*clientIt)->isAlive()) {
+ (*clientIt)->flush();
+ } else {
+ VLOG("ShellSubscriber: removing client!");
+
+ // Erasing a value moves the iterator to the next value. The update expression also
+ // moves the iterator, skipping a value. This is fine because we do an early return
+ // before next iteration of the loop.
+ clientIt = mClientSet.erase(clientIt);
+ updateLogEventFilterLocked();
+ }
+ return;
+ }
+ }
+}
+
+void ShellSubscriber::unsubscribe(const shared_ptr<IStatsSubscriptionCallback>& callback) {
+ std::unique_lock<std::mutex> lock(mMutex);
+
+ // TODO(b/268822860): Consider storing callback clients in a map keyed by
+ // IStatsSubscriptionCallback to avoid this linear search.
+ for (auto clientIt = mClientSet.begin(); clientIt != mClientSet.end(); ++clientIt) {
+ if ((*clientIt)->hasCallback(callback)) {
+ if ((*clientIt)->isAlive()) {
+ (*clientIt)->onUnsubscribe();
+ }
+ VLOG("ShellSubscriber: removing client!");
+
+ // Erasing a value moves the iterator to the next value. The update expression also
+ // moves the iterator, skipping a value. This is fine because we do an early return
+ // before next iteration of the loop.
+ clientIt = mClientSet.erase(clientIt);
+ updateLogEventFilterLocked();
+ return;
+ }
+ }
+}
+
+void ShellSubscriber::updateLogEventFilterLocked() const {
+ VLOG("ShellSubscriber: Updating allAtomIds");
+ if (!mLogEventFilter) {
return;
}
-
- // Then, write the payload if this is not just a heartbeat.
- if (dataSize > 0 && !mProto.flush(mSubscriptionInfo->mOutputFd)) {
- mSubscriptionInfo->mClientAlive = false;
- mSubscriptionShouldEnd.notify_one();
- return;
+ LogEventFilter::AtomIdSet allAtomIds;
+ for (const auto& client : mClientSet) {
+ client->addAllAtomIds(allAtomIds);
}
-
- mLastWriteMs = getElapsedRealtimeMillis();
+ VLOG("ShellSubscriber: Updating allAtomIds done. Total atoms %d", (int)allAtomIds.size());
+ mLogEventFilter->setAtomIds(std::move(allAtomIds), this);
}
} // namespace statsd
diff --git a/statsd/src/shell/ShellSubscriber.h b/statsd/src/shell/ShellSubscriber.h
index 826ad31..4ff8861 100644
--- a/statsd/src/shell/ShellSubscriber.h
+++ b/statsd/src/shell/ShellSubscriber.h
@@ -16,18 +16,17 @@
#pragma once
-#include <android/util/ProtoOutputStream.h>
-#include <private/android_filesystem_config.h>
+#include <aidl/android/os/IStatsSubscriptionCallback.h>
#include <condition_variable>
#include <mutex>
#include <thread>
#include "external/StatsPullerManager.h"
+#include "packages/UidMap.h"
+#include "shell/ShellSubscriberClient.h"
#include "src/shell/shell_config.pb.h"
#include "src/statsd_config.pb.h"
-#include "logd/LogEvent.h"
-#include "packages/UidMap.h"
namespace android {
namespace os {
@@ -54,91 +53,64 @@
* The stream would be in the following format:
* |size_t|shellData proto|size_t|shellData proto|....
*
- * Only one shell subscriber is allowed at a time because each shell subscriber blocks one thread
- * until it exits.
*/
class ShellSubscriber : public virtual RefBase {
public:
- ShellSubscriber(sp<UidMap> uidMap, sp<StatsPullerManager> pullerMgr)
- : mUidMap(uidMap), mPullerMgr(pullerMgr){};
+ ShellSubscriber(sp<UidMap> uidMap, sp<StatsPullerManager> pullerMgr,
+ const std::shared_ptr<LogEventFilter>& logEventFilter)
+ : mUidMap(uidMap), mPullerMgr(pullerMgr), mLogEventFilter(logEventFilter){};
- void startNewSubscription(int inFd, int outFd, int timeoutSec);
+ ~ShellSubscriber();
+
+ // Create new ShellSubscriberClient with file descriptors to manage a new subscription.
+ bool startNewSubscription(int inFd, int outFd, int64_t timeoutSec);
+
+ // Create new ShellSubscriberClient with Binder callback to manage a new subscription.
+ bool startNewSubscription(
+ const vector<uint8_t>& subscriptionConfig,
+ const shared_ptr<aidl::android::os::IStatsSubscriptionCallback>& callback);
void onLogEvent(const LogEvent& event);
+ void flushSubscription(
+ const shared_ptr<aidl::android::os::IStatsSubscriptionCallback>& callback);
+
+ void unsubscribe(const shared_ptr<aidl::android::os::IStatsSubscriptionCallback>& callback);
+
+ static size_t getMaxSizeKb() {
+ return ShellSubscriberClient::getMaxSizeKb();
+ }
+
+ static size_t getMaxSubscriptions() {
+ return kMaxSubscriptions;
+ }
+
private:
- struct PullInfo {
- PullInfo(const SimpleAtomMatcher& matcher, int64_t interval,
- const std::vector<std::string>& packages, const std::vector<int32_t>& uids)
- : mPullerMatcher(matcher),
- mInterval(interval),
- mPrevPullElapsedRealtimeMs(0),
- mPullPackages(packages),
- mPullUids(uids) {
- }
- SimpleAtomMatcher mPullerMatcher;
- int64_t mInterval;
- int64_t mPrevPullElapsedRealtimeMs;
- std::vector<std::string> mPullPackages;
- std::vector<int32_t> mPullUids;
- };
+ bool startNewSubscriptionLocked(unique_ptr<ShellSubscriberClient> client);
- struct SubscriptionInfo {
- SubscriptionInfo(const int& inputFd, const int& outputFd)
- : mInputFd(inputFd), mOutputFd(outputFd), mClientAlive(true) {
- }
+ void pullAndSendHeartbeats();
- int mInputFd;
- int mOutputFd;
- std::vector<SimpleAtomMatcher> mPushedMatchers;
- std::vector<PullInfo> mPulledInfo;
- bool mClientAlive;
- };
-
- int claimToken();
-
- bool readConfig(std::shared_ptr<SubscriptionInfo> subscriptionInfo);
-
- void spawnHelperThread(int myToken);
-
- void waitForSubscriptionToEndLocked(std::shared_ptr<SubscriptionInfo> myInfo,
- int myToken,
- std::unique_lock<std::mutex>& lock,
- int timeoutSec);
-
- // Helper thread that pulls atoms at a regular frequency and sends
- // heartbeats to perfd if statsd hasn't recently sent any data. Statsd must
- // send heartbeats for perfd to escape a blocking read call and recheck if
- // the user has terminated the subscription.
- void pullAndSendHeartbeats(int myToken);
-
- void writePulledAtomsLocked(const vector<std::shared_ptr<LogEvent>>& data,
- const SimpleAtomMatcher& matcher);
-
- void getUidsForPullAtom(vector<int32_t>* uids, const PullInfo& pullInfo);
-
- void attemptWriteToPipeLocked(size_t dataSize);
+ /* Tells LogEventFilter about atom ids to parse */
+ void updateLogEventFilterLocked() const;
sp<UidMap> mUidMap;
sp<StatsPullerManager> mPullerMgr;
- android::util::ProtoOutputStream mProto;
+ std::shared_ptr<LogEventFilter> mLogEventFilter;
+ // Protects mClientSet, mThreadAlive, and ShellSubscriberClient
mutable std::mutex mMutex;
- std::condition_variable mSubscriptionShouldEnd;
+ std::set<unique_ptr<ShellSubscriberClient>> mClientSet;
- std::shared_ptr<SubscriptionInfo> mSubscriptionInfo = nullptr;
+ bool mThreadAlive = false;
- int mToken = 0;
+ std::condition_variable mThreadSleepCV;
- const int32_t DEFAULT_PULL_UID = AID_SYSTEM;
+ std::thread mThread;
- // Tracks when we last send data to perfd. We need that time to determine
- // when next to send a heartbeat.
- int64_t mLastWriteMs = 0;
- const int64_t kMsBetweenHeartbeats = 1000;
+ static constexpr size_t kMaxSubscriptions = 20;
};
} // namespace statsd
diff --git a/statsd/src/shell/ShellSubscriberClient.cpp b/statsd/src/shell/ShellSubscriberClient.cpp
new file mode 100644
index 0000000..546ed16
--- /dev/null
+++ b/statsd/src/shell/ShellSubscriberClient.cpp
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define STATSD_DEBUG false // STOPSHIP if true
+#include "Log.h"
+
+#include "ShellSubscriberClient.h"
+
+#include "FieldValue.h"
+#include "matchers/matcher_util.h"
+#include "stats_log_util.h"
+
+using android::base::unique_fd;
+using android::util::ProtoOutputStream;
+using Status = ::ndk::ScopedAStatus;
+
+namespace android {
+namespace os {
+namespace statsd {
+
+const static int FIELD_ID_SHELL_DATA__ATOM = 1;
+const static int FIELD_ID_SHELL_DATA__ELAPSED_TIMESTAMP_NANOS = 2;
+
+struct ReadConfigResult {
+ vector<SimpleAtomMatcher> pushedMatchers;
+ vector<ShellSubscriberClient::PullInfo> pullInfo;
+};
+
+// Read and parse single config. There should only one config in the input.
+static optional<ReadConfigResult> readConfig(const vector<uint8_t>& configBytes,
+ int64_t startTimeMs, int64_t minPullIntervalMs) {
+ // Parse the config.
+ ShellSubscription config;
+ if (!config.ParseFromArray(configBytes.data(), configBytes.size())) {
+ ALOGE("ShellSubscriberClient: failed to parse the config");
+ return nullopt;
+ }
+
+ ReadConfigResult result;
+
+ result.pushedMatchers.assign(config.pushed().begin(), config.pushed().end());
+
+ vector<ShellSubscriberClient::PullInfo> pullInfo;
+ for (const auto& pulled : config.pulled()) {
+ vector<string> packages;
+ vector<int32_t> uids;
+ for (const string& pkg : pulled.packages()) {
+ auto it = UidMap::sAidToUidMapping.find(pkg);
+ if (it != UidMap::sAidToUidMapping.end()) {
+ uids.push_back(it->second);
+ } else {
+ packages.push_back(pkg);
+ }
+ }
+
+ const int64_t pullIntervalMs = max(pulled.freq_millis(), minPullIntervalMs);
+ result.pullInfo.emplace_back(pulled.matcher(), startTimeMs, pullIntervalMs, packages, uids);
+ ALOGD("ShellSubscriberClient: adding matcher for pulled atom %d",
+ pulled.matcher().atom_id());
+ }
+
+ return result;
+}
+
+ShellSubscriberClient::PullInfo::PullInfo(const SimpleAtomMatcher& matcher, int64_t startTimeMs,
+ int64_t intervalMs,
+ const std::vector<std::string>& packages,
+ const std::vector<int32_t>& uids)
+ : mPullerMatcher(matcher),
+ mIntervalMs(intervalMs),
+ mPrevPullElapsedRealtimeMs(startTimeMs),
+ mPullPackages(packages),
+ mPullUids(uids) {
+}
+
+ShellSubscriberClient::ShellSubscriberClient(
+ int out, const std::shared_ptr<IStatsSubscriptionCallback>& callback,
+ const std::vector<SimpleAtomMatcher>& pushedMatchers,
+ const std::vector<PullInfo>& pulledInfo, int64_t timeoutSec, int64_t startTimeSec,
+ const sp<UidMap>& uidMap, const sp<StatsPullerManager>& pullerMgr)
+ : mUidMap(uidMap),
+ mPullerMgr(pullerMgr),
+ mDupOut(fcntl(out, F_DUPFD_CLOEXEC, 0)),
+ mPushedMatchers(pushedMatchers),
+ mPulledInfo(pulledInfo),
+ mCallback(callback),
+ mTimeoutSec(timeoutSec),
+ mStartTimeSec(startTimeSec),
+ mLastWriteMs(startTimeSec * 1000),
+ mCacheSize(0){};
+
+unique_ptr<ShellSubscriberClient> ShellSubscriberClient::create(
+ int in, int out, int64_t timeoutSec, int64_t startTimeSec, const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerMgr) {
+ // Read the size of the config.
+ size_t bufferSize;
+ if (!android::base::ReadFully(in, &bufferSize, sizeof(bufferSize))) {
+ return nullptr;
+ }
+
+ // Check bufferSize
+ if (bufferSize > (kMaxSizeKb * 1024)) {
+ ALOGE("ShellSubscriberClient: received config (%zu bytes) is larger than the max size (%zu "
+ "bytes)",
+ bufferSize, (kMaxSizeKb * 1024));
+ return nullptr;
+ }
+
+ // Read the config.
+ vector<uint8_t> buffer(bufferSize);
+ if (!android::base::ReadFully(in, buffer.data(), bufferSize)) {
+ ALOGE("ShellSubscriberClient: failed to read the config from file descriptor");
+ return nullptr;
+ }
+
+ const optional<ReadConfigResult> readConfigResult =
+ readConfig(buffer, startTimeSec * 1000, /* minPullIntervalMs */ 0);
+ if (!readConfigResult.has_value()) {
+ return nullptr;
+ }
+
+ return make_unique<ShellSubscriberClient>(
+ out, /*callback=*/nullptr, readConfigResult->pushedMatchers, readConfigResult->pullInfo,
+ timeoutSec, startTimeSec, uidMap, pullerMgr);
+}
+
+unique_ptr<ShellSubscriberClient> ShellSubscriberClient::create(
+ const vector<uint8_t>& subscriptionConfig,
+ const shared_ptr<IStatsSubscriptionCallback>& callback, int64_t startTimeSec,
+ const sp<UidMap>& uidMap, const sp<StatsPullerManager>& pullerMgr) {
+ if (callback == nullptr) {
+ ALOGE("ShellSubscriberClient: received nullptr callback");
+ return nullptr;
+ }
+
+ if (subscriptionConfig.size() > (kMaxSizeKb * 1024)) {
+ ALOGE("ShellSubscriberClient: received config (%zu bytes) is larger than the max size (%zu "
+ "bytes)",
+ subscriptionConfig.size(), (kMaxSizeKb * 1024));
+ return nullptr;
+ }
+
+ const optional<ReadConfigResult> readConfigResult =
+ readConfig(subscriptionConfig, startTimeSec * 1000,
+ ShellSubscriberClient::kMinCallbackPullIntervalMs);
+ if (!readConfigResult.has_value()) {
+ return nullptr;
+ }
+
+ return make_unique<ShellSubscriberClient>(
+ /*out=*/-1, callback, readConfigResult->pushedMatchers, readConfigResult->pullInfo,
+ /*timeoutSec=*/-1, startTimeSec, uidMap, pullerMgr);
+}
+
+bool ShellSubscriberClient::writeEventToProtoIfMatched(const LogEvent& event,
+ const SimpleAtomMatcher& matcher,
+ const sp<UidMap>& uidMap) {
+ if (!matchesSimple(uidMap, matcher, event)) {
+ return false;
+ }
+
+ // Cache atom event in mProtoOut.
+ uint64_t atomToken = mProtoOut.start(util::FIELD_TYPE_MESSAGE | util::FIELD_COUNT_REPEATED |
+ FIELD_ID_SHELL_DATA__ATOM);
+ event.ToProto(mProtoOut);
+ mProtoOut.end(atomToken);
+
+ const int64_t timestampNs = truncateTimestampIfNecessary(event);
+ mProtoOut.write(util::FIELD_TYPE_INT64 | util::FIELD_COUNT_REPEATED |
+ FIELD_ID_SHELL_DATA__ELAPSED_TIMESTAMP_NANOS,
+ static_cast<long long>(timestampNs));
+
+ // Update byte size of cached data.
+ mCacheSize += getSize(event.getValues()) + sizeof(timestampNs);
+
+ return true;
+}
+
+// Called by ShellSubscriber when a pushed event occurs
+void ShellSubscriberClient::onLogEvent(const LogEvent& event) {
+ for (const auto& matcher : mPushedMatchers) {
+ if (writeEventToProtoIfMatched(event, matcher, mUidMap)) {
+ flushProtoIfNeeded();
+ break;
+ }
+ }
+}
+
+void ShellSubscriberClient::flushProtoIfNeeded() {
+ if (mCallback == nullptr) { // Using file descriptor.
+ triggerFdFlush();
+ } else if (mCacheSize >= kMaxCacheSizeBytes) { // Using callback.
+ // Flush data if cache is full.
+ triggerCallback(StatsSubscriptionCallbackReason::STATSD_INITIATED);
+ }
+}
+
+int64_t ShellSubscriberClient::pullIfNeeded(int64_t nowSecs, int64_t nowMillis, int64_t nowNanos) {
+ int64_t sleepTimeMs = 24 * 60 * 60 * 1000; // 24 hours.
+ for (PullInfo& pullInfo : mPulledInfo) {
+ if (pullInfo.mPrevPullElapsedRealtimeMs + pullInfo.mIntervalMs <= nowMillis) {
+ vector<int32_t> uids;
+ getUidsForPullAtom(&uids, pullInfo);
+
+ vector<shared_ptr<LogEvent>> data;
+ mPullerMgr->Pull(pullInfo.mPullerMatcher.atom_id(), uids, nowNanos, &data);
+ VLOG("ShellSubscriberClient: pulled %zu atoms with id %d", data.size(),
+ pullInfo.mPullerMatcher.atom_id());
+
+ writePulledAtomsLocked(data, pullInfo.mPullerMatcher);
+ pullInfo.mPrevPullElapsedRealtimeMs = nowMillis;
+ }
+
+ // Determine how long to sleep before doing more work.
+ const int64_t nextPullTimeMs = pullInfo.mPrevPullElapsedRealtimeMs + pullInfo.mIntervalMs;
+
+ const int64_t timeBeforePullMs =
+ nextPullTimeMs - nowMillis; // guaranteed to be non-negative
+ sleepTimeMs = min(sleepTimeMs, timeBeforePullMs);
+ }
+ return sleepTimeMs;
+}
+
+// The pullAndHeartbeat threads sleep for the minimum time
+// among all clients' input
+int64_t ShellSubscriberClient::pullAndSendHeartbeatsIfNeeded(int64_t nowSecs, int64_t nowMillis,
+ int64_t nowNanos) {
+ int64_t sleepTimeMs;
+ if (mCallback == nullptr) { // File descriptor subscription
+ if ((nowSecs - mStartTimeSec >= mTimeoutSec) && (mTimeoutSec > 0)) {
+ mClientAlive = false;
+ return kMsBetweenHeartbeats;
+ }
+
+ sleepTimeMs = min(kMsBetweenHeartbeats, pullIfNeeded(nowSecs, nowMillis, nowNanos));
+
+ // Send a heartbeat consisting of data size of 0, if
+ // the user hasn't recently received data from statsd. When it receives the data size of 0,
+ // the user will not expect any atoms and recheck whether the subscription should end.
+ if (nowMillis - mLastWriteMs >= kMsBetweenHeartbeats) {
+ triggerFdFlush();
+ if (!mClientAlive) return kMsBetweenHeartbeats;
+ }
+
+ int64_t timeBeforeHeartbeat = mLastWriteMs + kMsBetweenHeartbeats - nowMillis;
+ sleepTimeMs = min(sleepTimeMs, timeBeforeHeartbeat);
+ } else { // Callback subscription.
+ sleepTimeMs = min(kMsBetweenCallbacks, pullIfNeeded(nowSecs, nowMillis, nowNanos));
+
+ if (mCacheSize > 0 && nowMillis - mLastWriteMs >= kMsBetweenCallbacks) {
+ // Flush data if cache has kept data for longer than kMsBetweenCallbacks.
+ triggerCallback(StatsSubscriptionCallbackReason::STATSD_INITIATED);
+ }
+
+ // Cache should be flushed kMsBetweenCallbacks after mLastWrite.
+ const int64_t timeToCallbackMs = mLastWriteMs + kMsBetweenCallbacks - nowMillis;
+
+ // For callback subscriptions, ensure minimum sleep time is at least
+ // kMinCallbackSleepIntervalMs. Even if there is less than kMinCallbackSleepIntervalMs left
+ // before next pull time, sleep for at least kMinCallbackSleepIntervalMs. This has the
+ // effect of multiple pulled atoms that have a pull within kMinCallbackSleepIntervalMs from
+ // now to have their pulls batched together, mitigating frequent wakeups of the puller
+ // thread.
+ sleepTimeMs = max(kMinCallbackSleepIntervalMs, min(sleepTimeMs, timeToCallbackMs));
+ }
+ return sleepTimeMs;
+}
+
+void ShellSubscriberClient::writePulledAtomsLocked(const vector<shared_ptr<LogEvent>>& data,
+ const SimpleAtomMatcher& matcher) {
+ bool hasData = false;
+ for (const shared_ptr<LogEvent>& event : data) {
+ if (writeEventToProtoIfMatched(*event, matcher, mUidMap)) {
+ hasData = true;
+ }
+ }
+
+ if (hasData) {
+ flushProtoIfNeeded();
+ }
+}
+
+// Tries to write the atom encoded in mProtoOut to the pipe. If the write fails
+// because the read end of the pipe has closed, change the client status so
+// the manager knows the subscription is no longer active
+void ShellSubscriberClient::attemptWriteToPipeLocked() {
+ const size_t dataSize = mProtoOut.size();
+ // First, write the payload size.
+ if (!android::base::WriteFully(mDupOut, &dataSize, sizeof(dataSize))) {
+ mClientAlive = false;
+ return;
+ }
+ // Then, write the payload if this is not just a heartbeat.
+ if (dataSize > 0 && !mProtoOut.flush(mDupOut.get())) {
+ mClientAlive = false;
+ return;
+ }
+ mLastWriteMs = getElapsedRealtimeMillis();
+}
+
+void ShellSubscriberClient::getUidsForPullAtom(vector<int32_t>* uids, const PullInfo& pullInfo) {
+ uids->insert(uids->end(), pullInfo.mPullUids.begin(), pullInfo.mPullUids.end());
+ // This is slow. Consider storing the uids per app and listening to uidmap updates.
+ for (const string& pkg : pullInfo.mPullPackages) {
+ set<int32_t> uidsForPkg = mUidMap->getAppUid(pkg);
+ uids->insert(uids->end(), uidsForPkg.begin(), uidsForPkg.end());
+ }
+ uids->push_back(DEFAULT_PULL_UID);
+}
+
+void ShellSubscriberClient::clearCache() {
+ mProtoOut.clear();
+ mCacheSize = 0;
+}
+
+void ShellSubscriberClient::triggerFdFlush() {
+ attemptWriteToPipeLocked();
+ clearCache();
+}
+
+void ShellSubscriberClient::triggerCallback(StatsSubscriptionCallbackReason reason) {
+ // Invoke Binder callback with cached event data.
+ vector<uint8_t> payloadBytes;
+ mProtoOut.serializeToVector(&payloadBytes);
+ const Status status = mCallback->onSubscriptionData(reason, payloadBytes);
+ if (status.getStatus() == STATUS_DEAD_OBJECT &&
+ status.getExceptionCode() == EX_TRANSACTION_FAILED) {
+ mClientAlive = false;
+ return;
+ }
+
+ mLastWriteMs = getElapsedRealtimeMillis();
+ clearCache();
+}
+
+void ShellSubscriberClient::flush() {
+ triggerCallback(StatsSubscriptionCallbackReason::FLUSH_REQUESTED);
+}
+
+void ShellSubscriberClient::onUnsubscribe() {
+ triggerCallback(StatsSubscriptionCallbackReason::SUBSCRIPTION_ENDED);
+}
+
+void ShellSubscriberClient::addAllAtomIds(LogEventFilter::AtomIdSet& allAtomIds) const {
+ for (const auto& matcher : mPushedMatchers) {
+ allAtomIds.insert(matcher.atom_id());
+ }
+}
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/src/shell/ShellSubscriberClient.h b/statsd/src/shell/ShellSubscriberClient.h
new file mode 100644
index 0000000..9d03cb4
--- /dev/null
+++ b/statsd/src/shell/ShellSubscriberClient.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/os/IStatsSubscriptionCallback.h>
+#include <aidl/android/os/StatsSubscriptionCallbackReason.h>
+#include <android-base/file.h>
+#include <android/util/ProtoOutputStream.h>
+#include <private/android_filesystem_config.h>
+
+#include <memory>
+
+#include "external/StatsPullerManager.h"
+#include "logd/LogEvent.h"
+#include "packages/UidMap.h"
+#include "socket/LogEventFilter.h"
+#include "src/shell/shell_config.pb.h"
+#include "src/statsd_config.pb.h"
+
+using aidl::android::os::IStatsSubscriptionCallback;
+using aidl::android::os::StatsSubscriptionCallbackReason;
+
+namespace android {
+namespace os {
+namespace statsd {
+
+// ShellSubscriberClient is not thread-safe. All calls must be
+// guarded by the mutex in ShellSubscriber.h
+class ShellSubscriberClient {
+public:
+ struct PullInfo {
+ PullInfo(const SimpleAtomMatcher& matcher, int64_t startTimeMs, int64_t interval,
+ const std::vector<std::string>& packages, const std::vector<int32_t>& uids);
+
+ const SimpleAtomMatcher mPullerMatcher;
+ const int64_t mIntervalMs;
+ int64_t mPrevPullElapsedRealtimeMs;
+ const std::vector<std::string> mPullPackages;
+ const std::vector<int32_t> mPullUids;
+ };
+
+ static std::unique_ptr<ShellSubscriberClient> create(int in, int out, int64_t timeoutSec,
+ int64_t startTimeSec,
+ const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerMgr);
+
+ static std::unique_ptr<ShellSubscriberClient> create(
+ const std::vector<uint8_t>& subscriptionConfig,
+ const std::shared_ptr<IStatsSubscriptionCallback>& callback, int64_t startTimeSec,
+ const sp<UidMap>& uidMap, const sp<StatsPullerManager>& pullerMgr);
+
+ // Should only be called by the create() factory.
+ explicit ShellSubscriberClient(int out,
+ const std::shared_ptr<IStatsSubscriptionCallback>& callback,
+ const std::vector<SimpleAtomMatcher>& pushedMatchers,
+ const std::vector<PullInfo>& pulledInfo, int64_t timeoutSec,
+ int64_t startTimeSec, const sp<UidMap>& uidMap,
+ const sp<StatsPullerManager>& pullerMgr);
+
+ void onLogEvent(const LogEvent& event);
+
+ int64_t pullAndSendHeartbeatsIfNeeded(int64_t nowSecs, int64_t nowMillis, int64_t nowNanos);
+
+ // Should only be called when mCallback is not nullptr.
+ void flush();
+
+ // Should only be called when mCallback is not nullptr.
+ void onUnsubscribe();
+
+ bool isAlive() const {
+ return mClientAlive;
+ }
+
+ bool hasCallback(const std::shared_ptr<IStatsSubscriptionCallback>& callback) const {
+ return mCallback != nullptr && callback != nullptr &&
+ callback->asBinder() == mCallback->asBinder();
+ }
+
+ static size_t getMaxSizeKb() {
+ return kMaxSizeKb;
+ }
+
+ void addAllAtomIds(LogEventFilter::AtomIdSet& allAtomIds) const;
+
+ // Minimum pull interval for callback subscriptions.
+ static constexpr int64_t kMinCallbackPullIntervalMs = 60'000; // 60 seconds.
+
+ // Minimum sleep for the pull thread for callback subscriptions.
+ static constexpr int64_t kMinCallbackSleepIntervalMs = 2000; // 2 seconds.
+private:
+ int64_t pullIfNeeded(int64_t nowSecs, int64_t nowMillis, int64_t nowNanos);
+
+ void writePulledAtomsLocked(const vector<std::shared_ptr<LogEvent>>& data,
+ const SimpleAtomMatcher& matcher);
+
+ void attemptWriteToPipeLocked();
+
+ void getUidsForPullAtom(vector<int32_t>* uids, const PullInfo& pullInfo);
+
+ void flushProtoIfNeeded();
+
+ bool writeEventToProtoIfMatched(const LogEvent& event, const SimpleAtomMatcher& matcher,
+ const sp<UidMap>& uidMap);
+
+ void clearCache();
+
+ void triggerFdFlush();
+
+ void triggerCallback(StatsSubscriptionCallbackReason reason);
+
+ const int32_t DEFAULT_PULL_UID = AID_SYSTEM;
+
+ const sp<UidMap> mUidMap;
+
+ const sp<StatsPullerManager> mPullerMgr;
+
+ android::base::unique_fd mDupOut;
+
+ const std::vector<SimpleAtomMatcher> mPushedMatchers;
+
+ std::vector<PullInfo> mPulledInfo;
+
+ std::shared_ptr<IStatsSubscriptionCallback> mCallback;
+
+ const int64_t mTimeoutSec;
+
+ const int64_t mStartTimeSec;
+
+ bool mClientAlive = true;
+
+ int64_t mLastWriteMs;
+
+ // Stores Atom proto messages for events along with their respective timestamps.
+ ProtoOutputStream mProtoOut;
+
+ // Stores the total approximate encoded proto byte-size for cached Atom events in
+ // mEventTimestampNs and mProtoOut.
+ size_t mCacheSize;
+
+ static constexpr int64_t kMsBetweenHeartbeats = 1000;
+
+ // Cap the buffer size of configs to guard against bad allocations
+ static constexpr size_t kMaxSizeKb = 50;
+
+ static constexpr size_t kMaxCacheSizeBytes = 2 * 1024; // 2 KB
+
+ static constexpr int64_t kMsBetweenCallbacks = 70'000; // 70 seconds.
+};
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/src/shell/shell_config.proto b/statsd/src/shell/shell_config.proto
index 9c18c4f..6b2890c 100644
--- a/statsd/src/shell/shell_config.proto
+++ b/statsd/src/shell/shell_config.proto
@@ -27,7 +27,7 @@
optional SimpleAtomMatcher matcher = 1;
/* gap between two pulls in milliseconds */
- optional int32 freq_millis = 2;
+ optional int64 freq_millis = 2;
/* Packages that the pull is requested from */
repeated string packages = 3;
@@ -36,4 +36,4 @@
message ShellSubscription {
repeated SimpleAtomMatcher pushed = 1;
repeated PulledAtomSubscription pulled = 2;
-}
\ No newline at end of file
+}
diff --git a/statsd/src/shell/shell_data.proto b/statsd/src/shell/shell_data.proto
index ec41cbc..fdfbcc3 100644
--- a/statsd/src/shell/shell_data.proto
+++ b/statsd/src/shell/shell_data.proto
@@ -26,4 +26,5 @@
// The output of shell subscription, including both pulled and pushed subscriptions.
message ShellData {
repeated Atom atom = 1;
+ repeated int64 elapsed_timestamp_nanos = 2 [packed = true];
}
diff --git a/statsd/src/socket/LogEventFilter.h b/statsd/src/socket/LogEventFilter.h
new file mode 100644
index 0000000..aec91ef
--- /dev/null
+++ b/statsd/src/socket/LogEventFilter.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <gtest/gtest_prod.h>
+
+#include <atomic>
+#include <mutex>
+#include <unordered_map>
+#include <unordered_set>
+
+namespace android {
+namespace os {
+namespace statsd {
+
+/**
+ * Templating is for benchmarks only
+ *
+ * Based on benchmarks the more fast container to be used for atom ids filtering
+ * is unordered_set<int>
+ * #BM_LogEventFilterUnorderedSet 391208 ns 390086 ns 1793
+ * #BM_LogEventFilterUnorderedSet2Consumers 1293527 ns 1289326 ns 543
+ * #BM_LogEventFilterSet 613362 ns 611259 ns 1146
+ * #BM_LogEventFilterSet2Consumers 1859397 ns 1854193 ns 378
+ *
+ * See @LogEventFilter definition below
+ */
+template <typename T>
+class LogEventFilterGeneric {
+public:
+ virtual ~LogEventFilterGeneric() = default;
+
+ virtual void setFilteringEnabled(bool isEnabled) {
+ mLogsFilteringEnabled = isEnabled;
+ }
+
+ bool getFilteringEnabled() const {
+ return mLogsFilteringEnabled;
+ }
+
+ /**
+ * @brief Tests atom id with list of interesting atoms
+ * If Logs filtering is disabled - assume all atoms in use
+ * Most of the time should be non-blocking call - only in case when setAtomIds() was
+ * called the call will be blocking due to atom list needs to be synced up
+ * @param atomId
+ * @return true if atom is used by any of consumer or filtering is disabled
+ */
+ virtual bool isAtomInUse(int atomId) const {
+ if (!mLogsFilteringEnabled) {
+ return true;
+ }
+
+ // check if there is an updated set of interesting atom ids
+ if (mLocalSetUpdateCounter != mSetUpdateCounter.load(std::memory_order_relaxed)) {
+ std::lock_guard<std::mutex> guard(mTagIdsMutex);
+ mLocalSetUpdateCounter = mSetUpdateCounter.load(std::memory_order_relaxed);
+ mLocalTagIds.swap(mTagIds);
+ }
+ return mLocalTagIds.find(atomId) != mLocalTagIds.end();
+ }
+
+ typedef const void* ConsumerId;
+
+ typedef T AtomIdSet;
+ /**
+ * @brief Set the Atom Ids object
+ *
+ * @param tagIds set of atoms ids
+ * @param consumer used to differentiate the consumers to form proper superset of ids
+ */
+ virtual void setAtomIds(AtomIdSet tagIds, ConsumerId consumer) {
+ std::lock_guard lock(mTagIdsMutex);
+ // update ids list from consumer
+ if (tagIds.size() == 0) {
+ mTagIdsPerConsumer.erase(consumer);
+ } else {
+ mTagIdsPerConsumer[consumer].swap(tagIds);
+ }
+ // populate the superset incorporating list of distinct atom ids from all consumers
+ mTagIds.clear();
+ for (const auto& [_, atomIds] : mTagIdsPerConsumer) {
+ mTagIds.insert(atomIds.begin(), atomIds.end());
+ }
+ mSetUpdateCounter.fetch_add(1, std::memory_order_relaxed);
+ }
+
+private:
+ std::atomic_bool mLogsFilteringEnabled = true;
+ std::atomic_int mSetUpdateCounter;
+ mutable int mLocalSetUpdateCounter;
+
+ mutable std::mutex mTagIdsMutex;
+ std::unordered_map<ConsumerId, AtomIdSet> mTagIdsPerConsumer;
+ mutable AtomIdSet mTagIds;
+ mutable AtomIdSet mLocalTagIds;
+
+ friend class LogEventFilterTest;
+
+ FRIEND_TEST(LogEventFilterTest, TestEmptyFilter);
+ FRIEND_TEST(LogEventFilterTest, TestRemoveNonExistingEmptyFilter);
+ FRIEND_TEST(LogEventFilterTest, TestEmptyFilterDisabled);
+ FRIEND_TEST(LogEventFilterTest, TestEmptyFilterDisabledSetter);
+ FRIEND_TEST(LogEventFilterTest, TestNonEmptyFilterFullOverlap);
+ FRIEND_TEST(LogEventFilterTest, TestNonEmptyFilterPartialOverlap);
+ FRIEND_TEST(LogEventFilterTest, TestNonEmptyFilterDisabled);
+ FRIEND_TEST(LogEventFilterTest, TestNonEmptyFilterDisabledPartialOverlap);
+ FRIEND_TEST(LogEventFilterTest, TestMultipleConsumerOverlapIds);
+ FRIEND_TEST(LogEventFilterTest, TestMultipleConsumerNonOverlapIds);
+ FRIEND_TEST(LogEventFilterTest, TestMultipleConsumerOverlapIdsRemoved);
+ FRIEND_TEST(LogEventFilterTest, TestMultipleConsumerNonOverlapIdsRemoved);
+ FRIEND_TEST(LogEventFilterTest, TestMultipleConsumerEmptyFilter);
+};
+
+typedef LogEventFilterGeneric<std::unordered_set<int>> LogEventFilter;
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/src/socket/StatsSocketListener.cpp b/statsd/src/socket/StatsSocketListener.cpp
old mode 100755
new mode 100644
index 7b46573..5873df3
--- a/statsd/src/socket/StatsSocketListener.cpp
+++ b/statsd/src/socket/StatsSocketListener.cpp
@@ -36,11 +36,11 @@
namespace os {
namespace statsd {
-StatsSocketListener::StatsSocketListener(std::shared_ptr<LogEventQueue> queue)
- : SocketListener(getLogSocket(), false /*start listen*/), mQueue(queue) {
-}
-
-StatsSocketListener::~StatsSocketListener() {
+StatsSocketListener::StatsSocketListener(std::shared_ptr<LogEventQueue> queue,
+ const std::shared_ptr<LogEventFilter>& logEventFilter)
+ : SocketListener(getLogSocket(), false /*start listen*/),
+ mQueue(std::move(queue)),
+ mLogEventFilter(logEventFilter) {
}
bool StatsSocketListener::onDataAvailable(SocketClient* cli) {
@@ -120,22 +120,38 @@
}
// move past the 4-byte StatsEventTag
- uint8_t* msg = ptr + sizeof(uint32_t);
- uint32_t len = n - sizeof(uint32_t);
- uint32_t uid = cred->uid;
- uint32_t pid = cred->pid;
+ const uint8_t* msg = ptr + sizeof(uint32_t);
+ const uint32_t len = n - sizeof(uint32_t);
+ const uint32_t uid = cred->uid;
+ const uint32_t pid = cred->pid;
- int64_t oldestTimestamp;
- std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(uid, pid);
- logEvent->parseBuffer(msg, len);
-
- if (!mQueue->push(std::move(logEvent), &oldestTimestamp)) {
- StatsdStats::getInstance().noteEventQueueOverflow(oldestTimestamp);
- }
+ processMessage(msg, len, uid, pid, mQueue, mLogEventFilter);
return true;
}
+void StatsSocketListener::processMessage(const uint8_t* msg, uint32_t len, uint32_t uid,
+ uint32_t pid, const std::shared_ptr<LogEventQueue>& queue,
+ const std::shared_ptr<LogEventFilter>& filter) {
+ std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(uid, pid);
+
+ if (filter && filter->getFilteringEnabled()) {
+ const LogEvent::BodyBufferInfo bodyInfo = logEvent->parseHeader(msg, len);
+ if (filter->isAtomInUse(logEvent->GetTagId())) {
+ logEvent->parseBody(bodyInfo);
+ }
+ } else {
+ logEvent->parseBuffer(msg, len);
+ }
+
+ const int32_t atomId = logEvent->GetTagId();
+ const bool isAtomSkipped = logEvent->isParsedHeaderOnly();
+ int64_t oldestTimestamp;
+ if (!queue->push(std::move(logEvent), &oldestTimestamp)) {
+ StatsdStats::getInstance().noteEventQueueOverflow(oldestTimestamp, atomId, isAtomSkipped);
+ }
+}
+
int StatsSocketListener::getLogSocket() {
static const char socketName[] = "statsdw";
int sock = android_get_control_socket(socketName);
diff --git a/statsd/src/socket/StatsSocketListener.h b/statsd/src/socket/StatsSocketListener.h
index 43822f3..74a338e 100644
--- a/statsd/src/socket/StatsSocketListener.h
+++ b/statsd/src/socket/StatsSocketListener.h
@@ -15,8 +15,11 @@
*/
#pragma once
+#include <gtest/gtest_prod.h>
#include <sysutils/SocketListener.h>
#include <utils/RefBase.h>
+
+#include "LogEventFilter.h"
#include "logd/LogEventQueue.h"
// DEFAULT_OVERFLOWUID is defined in linux/highuid.h, which is not part of
@@ -35,20 +38,53 @@
class StatsSocketListener : public SocketListener, public virtual RefBase {
public:
- explicit StatsSocketListener(std::shared_ptr<LogEventQueue> queue);
+ explicit StatsSocketListener(std::shared_ptr<LogEventQueue> queue,
+ const std::shared_ptr<LogEventFilter>& logEventFilter);
- virtual ~StatsSocketListener();
+ virtual ~StatsSocketListener() = default;
protected:
- virtual bool onDataAvailable(SocketClient* cli);
+ bool onDataAvailable(SocketClient* cli) override;
private:
static int getLogSocket();
+
+ /**
+ * @brief Helper API to parse buffer, make the LogEvent & submit it into the queue
+ * Created as a separate API to be easily tested without StatsSocketListener instance
+ *
+ * @param msg buffer to parse
+ * @param len size of buffer in bytes
+ * @param uid arguments for LogEvent constructor
+ * @param pid arguments for LogEvent constructor
+ * @param queue queue to submit the event
+ * @param filter to be used for event evaluation
+ */
+ static void processMessage(const uint8_t* msg, uint32_t len, uint32_t uid, uint32_t pid,
+ const std::shared_ptr<LogEventQueue>& queue,
+ const std::shared_ptr<LogEventFilter>& filter);
+
/**
* Who is going to get the events when they're read.
*/
std::shared_ptr<LogEventQueue> mQueue;
+
+ std::shared_ptr<LogEventFilter> mLogEventFilter;
+
+ friend class SocketParseMessageTest;
+ friend void generateAtomLogging(const std::shared_ptr<LogEventQueue>& queue,
+ const std::shared_ptr<LogEventFilter>& filter, int eventCount,
+ int startAtomId);
+
+ FRIEND_TEST(SocketParseMessageTestNoFiltering, TestProcessMessageNoFiltering);
+ FRIEND_TEST(SocketParseMessageTestNoFiltering,
+ TestProcessMessageNoFilteringWithEmptySetExplicitSet);
+ FRIEND_TEST(SocketParseMessageTest, TestProcessMessageFilterEmptySet);
+ FRIEND_TEST(SocketParseMessageTest, TestProcessMessageFilterCompleteSet);
+ FRIEND_TEST(SocketParseMessageTest, TestProcessMessageFilterPartialSet);
+ FRIEND_TEST(SocketParseMessageTest, TestProcessMessageFilterToggle);
};
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/state/StateManager.cpp b/statsd/src/state/StateManager.cpp
index c488d04..e75bd97 100644
--- a/statsd/src/state/StateManager.cpp
+++ b/statsd/src/state/StateManager.cpp
@@ -108,6 +108,12 @@
}
}
+void StateManager::addAllAtomIds(LogEventFilter::AtomIdSet& allIds) const {
+ for (const auto& stateTracker : mStateTrackers) {
+ allIds.insert(stateTracker.first);
+ }
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/state/StateManager.h b/statsd/src/state/StateManager.h
index 18c404c..68b5c90 100644
--- a/statsd/src/state/StateManager.h
+++ b/statsd/src/state/StateManager.h
@@ -24,6 +24,7 @@
#include "HashableDimensionKey.h"
#include "packages/UidMap.h"
+#include "socket/LogEventFilter.h"
#include "state/StateListener.h"
#include "state/StateTracker.h"
@@ -35,7 +36,7 @@
* This class is NOT thread safe.
* It should only be used while StatsLogProcessor's lock is held.
*/
-class StateManager : public virtual RefBase {
+class StateManager {
public:
StateManager();
@@ -84,6 +85,8 @@
return -1;
}
+ void addAllAtomIds(LogEventFilter::AtomIdSet& allIds) const;
+
private:
mutable std::mutex mMutex;
diff --git a/statsd/src/state/StateTracker.cpp b/statsd/src/state/StateTracker.cpp
index ebba22a..0feba42 100644
--- a/statsd/src/state/StateTracker.cpp
+++ b/statsd/src/state/StateTracker.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define STATSD_DEBUG true // STOPSHIP if true
+#define STATSD_DEBUG false // STOPSHIP if true
#include "Log.h"
#include "stats_util.h"
diff --git a/statsd/src/stats_log.proto b/statsd/src/stats_log.proto
index c64a9be..44add93 100644
--- a/statsd/src/stats_log.proto
+++ b/statsd/src/stats_log.proto
@@ -22,6 +22,7 @@
option java_outer_classname = "StatsLog";
import "frameworks/proto_logging/stats/atoms.proto";
+import "packages/modules/StatsD/statsd/src/guardrail/invalid_config_reason_enum.proto";
message DimensionsValue {
optional int32 field = 1;
@@ -78,6 +79,8 @@
optional int64 start_bucket_elapsed_millis = 5;
optional int64 end_bucket_elapsed_millis = 6;
+
+ optional int64 condition_true_nanos = 7;
}
message CountMetricData {
@@ -106,6 +109,8 @@
optional int64 start_bucket_elapsed_millis = 5;
optional int64 end_bucket_elapsed_millis = 6;
+
+ optional int64 condition_true_nanos = 7;
}
message DurationMetricData {
@@ -141,6 +146,7 @@
int64 value_long = 2;
double value_double = 3;
}
+ optional int32 sample_size = 4;
}
repeated Value values = 9;
@@ -430,7 +436,7 @@
repeated ConfigMetricsReport reports = 2;
- reserved 10;
+ reserved 10 to 13, 101;
}
message StatsdStatsReport {
@@ -438,6 +444,17 @@
optional int32 stats_end_time_sec = 2;
+ message InvalidConfigReason {
+ optional InvalidConfigReasonEnum reason = 1;
+ optional int64 metric_id = 2;
+ optional int64 state_id = 3;
+ optional int64 alert_id = 4;
+ optional int64 alarm_id = 5;
+ optional int64 subscription_id = 6;
+ repeated int64 matcher_id = 7;
+ repeated int64 condition_id = 8;
+ }
+
message MatcherStats {
optional int64 id = 1;
optional int32 matched_times = 2;
@@ -469,6 +486,7 @@
optional int32 matcher_count = 7;
optional int32 alert_count = 8;
optional bool is_valid = 9;
+ optional InvalidConfigReason invalid_config_reason = 24;
repeated int32 broadcast_sent_time_sec = 10;
repeated int32 data_drop_time_sec = 11;
repeated int64 data_drop_bytes = 21;
@@ -486,6 +504,12 @@
repeated Annotation annotation = 18;
repeated int32 activation_time_sec = 22;
repeated int32 deactivation_time_sec = 23;
+ repeated RestrictedMetricStats restricted_metric_stats = 25;
+ optional bool device_info_table_creation_failed = 26;
+ optional int32 restricted_db_corrupted_count = 27;
+ repeated int64 restricted_flush_latency = 28;
+ repeated int64 restricted_db_size_time_sec = 29;
+ repeated int64 restricted_db_size_bytes = 30;
}
repeated ConfigStats config_stats = 3;
@@ -494,6 +518,8 @@
optional int32 tag = 1;
optional int32 count = 2;
optional int32 error_count = 3;
+ optional int32 dropped_count = 4;
+ optional int32 skip_count = 5;
}
repeated AtomStats atom_stats = 7;
@@ -603,6 +629,42 @@
}
repeated ActivationBroadcastGuardrail activation_guardrail_stats = 19;
+
+ message RestrictedMetricStats {
+ optional int64 restricted_metric_id = 1;
+ optional int64 insert_error = 2;
+ optional int64 table_creation_error = 3;
+ optional int64 table_deletion_error = 4;
+ repeated int64 flush_latency_ns = 5;
+ optional int64 category_changed_count = 6;
+ }
+
+ message RestrictedMetricQueryStats {
+ optional int32 calling_uid = 1;
+ optional int64 config_id = 2;
+ optional int32 config_uid = 3;
+ optional string config_package = 4;
+
+ enum InvalidQueryReason {
+ UNKNOWN_REASON = 0;
+ FLAG_DISABLED = 1;
+ UNSUPPORTED_SQLITE_VERSION = 2;
+ AMBIGUOUS_CONFIG_KEY = 3;
+ CONFIG_KEY_NOT_FOUND = 4;
+ CONFIG_KEY_WITH_UNMATCHED_DELEGATE = 5;
+ QUERY_FAILURE = 6;
+ INCONSISTENT_ROW_SIZE = 7;
+ NULL_CALLBACK = 8;
+ }
+
+ optional InvalidQueryReason invalid_query_reason = 5;
+ optional int64 query_wall_time_ns = 6;
+ optional bool has_error = 7;
+ optional string query_error = 8;
+ optional int64 query_latency_ns = 9;
+ }
+ repeated RestrictedMetricQueryStats restricted_metric_query_stats = 20;
+ optional uint32 shard_offset = 21;
}
message AlertTriggerDetails {
diff --git a/statsd/src/stats_policy_config.proto b/statsd/src/stats_policy_config.proto
new file mode 100644
index 0000000..c1ea446
--- /dev/null
+++ b/statsd/src/stats_policy_config.proto
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.os.statsd;
+
+option java_package = "com.android.internal.os";
+option java_outer_classname = "StatsPolicyConfig";
+
+message StatsPolicyConfig {
+ optional int32 minimumClientsInAggregateResult = 1;
+}
\ No newline at end of file
diff --git a/statsd/src/statsd_config.proto b/statsd/src/statsd_config.proto
index be733b4..36ca44f 100644
--- a/statsd/src/statsd_config.proto
+++ b/statsd/src/statsd_config.proto
@@ -78,6 +78,12 @@
StringListMatcher eq_any_string = 13;
StringListMatcher neq_any_string = 14;
+ IntListMatcher eq_any_int = 15;
+ IntListMatcher neq_any_int = 16;
+
+ string eq_wildcard_string = 17;
+ StringListMatcher eq_any_wildcard_string = 18;
+ StringListMatcher neq_any_wildcard_string = 19;
}
}
@@ -89,6 +95,10 @@
repeated string str_value = 1;
}
+message IntListMatcher {
+ repeated int64 int_value = 1;
+}
+
enum LogicalOperation {
LOGICAL_OPERATION_UNSPECIFIED = 0;
AND = 1;
@@ -204,6 +214,12 @@
}
}
+message DimensionalSamplingInfo {
+ optional FieldMatcher sampled_what_field = 1;
+
+ optional int32 shard_count = 2;
+}
+
message EventMetric {
optional int64 id = 1;
@@ -240,6 +256,8 @@
optional FieldMatcher dimensions_in_condition = 7 [deprecated = true];
+ optional DimensionalSamplingInfo dimensional_sampling_info = 12;
+
reserved 100;
reserved 101;
}
@@ -274,6 +292,8 @@
optional FieldMatcher dimensions_in_condition = 8 [deprecated = true];
+ optional DimensionalSamplingInfo dimensional_sampling_info = 13;
+
reserved 100;
reserved 101;
}
@@ -313,6 +333,8 @@
optional bool split_bucket_for_app_upgrade = 14;
+ optional DimensionalSamplingInfo dimensional_sampling_info = 15;
+
reserved 100;
reserved 101;
}
@@ -348,6 +370,8 @@
}
optional AggregationType aggregation_type = 8 [default = SUM];
+ optional bool include_sample_size = 22;
+
optional int64 min_bucket_size_nanos = 10;
optional bool use_absolute_value_on_reset = 11 [default = false];
@@ -372,6 +396,8 @@
optional FieldMatcher dimensions_in_condition = 9 [deprecated = true];
+ optional DimensionalSamplingInfo dimensional_sampling_info = 23;
+
reserved 100;
reserved 101;
}
@@ -399,6 +425,8 @@
repeated MetricStateLink state_link = 11;
+ optional DimensionalSamplingInfo dimensional_sampling_info = 12;
+
reserved 100;
reserved 101;
}
@@ -563,6 +591,8 @@
optional uint32 package_certificate_hash_size_bytes = 26;
+ optional string restricted_metrics_delegate_package_name = 27;
+
// Do not use.
reserved 1000, 1001;
}
diff --git a/statsd/src/statsd_metadata.proto b/statsd/src/statsd_metadata.proto
index 200b392..7d18894 100644
--- a/statsd/src/statsd_metadata.proto
+++ b/statsd/src/statsd_metadata.proto
@@ -56,12 +56,18 @@
repeated AlertDimensionKeyedData alert_dim_keyed_data = 2;
}
+message MetricMetadata {
+ optional int64 metric_id = 1;
+ optional int32 restricted_category = 2;
+}
+
// All metadata for a config in statsd
message StatsMetadata {
optional ConfigKey config_key = 1;
repeated AlertMetadata alert_metadata = 2;
+ repeated MetricMetadata metric_metadata = 3;
}
message StatsMetadataList {
repeated StatsMetadata stats_metadata = 1;
-}
\ No newline at end of file
+}
diff --git a/statsd/src/storage/StorageManager.cpp b/statsd/src/storage/StorageManager.cpp
index b3e8d8a..a0d13f9 100644
--- a/statsd/src/storage/StorageManager.cpp
+++ b/statsd/src/storage/StorageManager.cpp
@@ -17,19 +17,25 @@
#define STATSD_DEBUG false // STOPSHIP if true
#include "Log.h"
-#include "android-base/stringprintf.h"
-#include "guardrail/StatsdStats.h"
#include "storage/StorageManager.h"
-#include "stats_log_util.h"
#include <android-base/file.h>
+#include <android-modules-utils/sdk_level.h>
#include <private/android_filesystem_config.h>
+#include <sys/stat.h>
+
#include <fstream>
+#include "android-base/stringprintf.h"
+#include "guardrail/StatsdStats.h"
+#include "stats_log_util.h"
+#include "utils/DbUtils.h"
+
namespace android {
namespace os {
namespace statsd {
+using android::modules::sdklevel::IsAtLeastU;
using android::util::FIELD_COUNT_REPEATED;
using android::util::FIELD_TYPE_MESSAGE;
using std::map;
@@ -40,11 +46,6 @@
*/
#define STATS_DATA_DIR "/data/misc/stats-data"
#define STATS_SERVICE_DIR "/data/misc/stats-service"
-#define TRAIN_INFO_DIR "/data/misc/train-info"
-#define TRAIN_INFO_PATH "/data/misc/train-info/train-info.bin"
-
-// Magic word at the start of the train info file, change this if changing the file format
-const uint32_t TRAIN_INFO_FILE_MAGIC = 0xfb7447bf;
// for ConfigMetricsReportList
const int FIELD_ID_REPORTS = 2;
@@ -84,7 +85,7 @@
dirent* de;
while ((de = readdir(dir.get()))) {
char* fileName = de->d_name;
- if (fileName[0] == '.') continue;
+ if (fileName[0] == '.' || de->d_type == DT_DIR) continue;
size_t fileNameLength = strlen(fileName);
if (fileNameLength >= trainName.length()) {
@@ -123,6 +124,16 @@
output->mIsHistory = (substr != nullptr && strcmp("history", substr) == 0);
}
+// Returns array of int64_t which contains a sqlite db's uid and configId
+static ConfigKey parseDbName(char* name) {
+ char* uid = strtok(name, "_");
+ char* configId = strtok(nullptr, ".");
+ if (uid == nullptr || configId == nullptr) {
+ return ConfigKey(-1, -1);
+ }
+ return ConfigKey(StrToInt64(uid), StrToInt64(configId));
+}
+
void StorageManager::writeFile(const char* file, const void* buffer, int numBytes) {
int fd = open(file, O_WRONLY | O_CREAT | O_CLOEXEC, S_IRUSR | S_IWUSR);
if (fd == -1) {
@@ -310,6 +321,32 @@
return false;
}
+ // On devices that are upgrading from 32 to 64 bit, reading size_t bytes will result in reading
+ // the wrong amount of data. We try to detect that issue here and read the file correctly.
+ // In the normal case on 64-bit devices, the trainNameSize's upper 4 bytes should be 0, but if
+ // a 32-bit device wrote the file, these would be the first 4 bytes of the train name.
+ bool is32To64BitUpgrade = false;
+ if ((sizeof(size_t) == sizeof(int64_t)) && (trainNameSize & 0xFFFFFFFF00000000) != 0) {
+ is32To64BitUpgrade = true;
+ // Reset the file offset to the magic + version code, and reread from the train name size.
+ off_t seekResult = lseek(fd, sizeof(magic) + sizeof(trainInfo.trainVersionCode), SEEK_SET);
+
+ if (seekResult != sizeof(magic) + sizeof(trainInfo.trainVersionCode)) {
+ VLOG("Failed to reset file offset when reading 32 to 64 bit train info");
+ close(fd);
+ return false;
+ }
+
+ int32_t trainNameSize32Bit;
+ result = read(fd, &trainNameSize32Bit, sizeof(int32_t));
+ if (result != sizeof(int32_t)) {
+ VLOG("Failed to read train name size 32 bit from train info file");
+ close(fd);
+ return false;
+ }
+ trainNameSize = trainNameSize32Bit;
+ }
+
// Read trainName
trainInfo.trainName.resize(trainNameSize);
result = read(fd, trainInfo.trainName.data(), trainNameSize);
@@ -330,11 +367,22 @@
// Read experiment ids count.
size_t experimentIdsCount;
- result = read(fd, &experimentIdsCount, sizeof(size_t));
- if (result != sizeof(size_t)) {
- VLOG("Failed to read train experiment id count from train info file");
- close(fd);
- return false;
+ int32_t experimentIdsCount32Bit;
+ if (is32To64BitUpgrade) {
+ result = read(fd, &experimentIdsCount32Bit, sizeof(int32_t));
+ if (result != sizeof(int32_t)) {
+ VLOG("Failed to read train experiment id count 32 bit from train info file");
+ close(fd);
+ return false;
+ }
+ experimentIdsCount = experimentIdsCount32Bit;
+ } else {
+ result = read(fd, &experimentIdsCount, sizeof(size_t));
+ if (result != sizeof(size_t)) {
+ VLOG("Failed to read train experiment id count from train info file");
+ close(fd);
+ return false;
+ }
}
// Read experimentIds
@@ -398,7 +446,7 @@
dirent* de;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') {
+ if (name[0] == '.' || de->d_type == DT_DIR) {
continue;
}
@@ -430,7 +478,7 @@
dirent* de;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') continue;
+ if (name[0] == '.' || de->d_type == DT_DIR) continue;
deleteFile(StringPrintf("%s/%s", path, name).c_str());
}
}
@@ -445,7 +493,7 @@
dirent* de;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') {
+ if (name[0] == '.' || de->d_type == DT_DIR) {
continue;
}
size_t nameLen = strlen(name);
@@ -467,7 +515,7 @@
dirent* de;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') continue;
+ if (name[0] == '.' || de->d_type == DT_DIR) continue;
VLOG("file %s", name);
FileName output;
@@ -489,7 +537,7 @@
dirent* de;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') continue;
+ if (name[0] == '.' || de->d_type == DT_DIR) continue;
size_t nameLen = strlen(name);
size_t suffixLen = suffix.length();
@@ -517,7 +565,7 @@
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
string fileName(name);
- if (name[0] == '.') continue;
+ if (name[0] == '.' || de->d_type == DT_DIR) continue;
FileName output;
parseFileName(name, &output);
@@ -578,7 +626,7 @@
dirent* de;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') continue;
+ if (name[0] == '.' || de->d_type == DT_DIR) continue;
FileName output;
parseFileName(name, &output);
@@ -618,7 +666,7 @@
dirent* de;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') {
+ if (name[0] == '.' || de->d_type == DT_DIR) {
continue;
}
size_t nameLen = strlen(name);
@@ -686,7 +734,7 @@
auto nowSec = getWallClockSec();
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') continue;
+ if (name[0] == '.' || de->d_type == DT_DIR) continue;
FileName output;
string file_name;
@@ -750,7 +798,7 @@
int totalFileSize = 0;
while ((de = readdir(dir.get()))) {
char* name = de->d_name;
- if (name[0] == '.') {
+ if (name[0] == '.' || de->d_type == DT_DIR) {
continue;
}
FileName output;
@@ -775,6 +823,49 @@
totalFileSize);
}
+void StorageManager::enforceDbGuardrails(const char* path, const int64_t currWallClockSec,
+ const int64_t maxBytes) {
+ if (!IsAtLeastU()) {
+ return;
+ }
+ unique_ptr<DIR, decltype(&closedir)> dir(opendir(path), closedir);
+ if (dir == NULL) {
+ VLOG("Path %s does not exist", path);
+ return;
+ }
+
+ dirent* de;
+ int64_t deleteThresholdSec = currWallClockSec - StatsdStats::kMaxAgeSecond;
+ while ((de = readdir(dir.get()))) {
+ char* name = de->d_name;
+ if (name[0] == '.' || de->d_type == DT_DIR) continue;
+ string fullPathName = StringPrintf("%s/%s", path, name);
+ struct stat fileInfo;
+ const ConfigKey key = parseDbName(name);
+ if (stat(fullPathName.c_str(), &fileInfo) != 0) {
+ // Remove file if stat fails.
+ remove(fullPathName.c_str());
+ continue;
+ }
+ StatsdStats::getInstance().noteRestrictedConfigDbSize(key, currWallClockSec,
+ fileInfo.st_size);
+ if (fileInfo.st_mtime <= deleteThresholdSec || fileInfo.st_size >= maxBytes) {
+ remove(fullPathName.c_str());
+ }
+ if (hasFile(dbutils::getDbName(key).c_str())) {
+ dbutils::verifyIntegrityAndDeleteIfNecessary(key);
+ } else {
+ // Remove file if the file name fails to parse.
+ remove(fullPathName.c_str());
+ }
+ }
+}
+
+bool StorageManager::hasFile(const char* file) {
+ struct stat fileInfo;
+ return stat(file, &fileInfo) == 0;
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/src/storage/StorageManager.h b/statsd/src/storage/StorageManager.h
index d59046d..9a25bf0 100644
--- a/statsd/src/storage/StorageManager.h
+++ b/statsd/src/storage/StorageManager.h
@@ -29,6 +29,16 @@
using android::util::ProtoOutputStream;
+/**
+ * NOTE: these directories are protected by SELinux, any changes here must also update
+ * the SELinux policies.
+ */
+#define TRAIN_INFO_DIR "/data/misc/train-info"
+#define TRAIN_INFO_PATH "/data/misc/train-info/train-info.bin"
+
+// Magic word at the start of the train info file, change this if changing the file format
+const uint32_t TRAIN_INFO_FILE_MAGIC = 0xfb7447bf;
+
class StorageManager : public virtual RefBase {
public:
struct FileInfo {
@@ -152,6 +162,11 @@
static void sortFiles(vector<FileInfo>* fileNames);
+ static void enforceDbGuardrails(const char* path, const int64_t wallClockSec,
+ const int64_t maxBytes);
+
+ static bool hasFile(const char* file);
+
private:
/**
* Prints disk usage statistics about a directory related to statsd.
diff --git a/statsd/src/utils/DbUtils.cpp b/statsd/src/utils/DbUtils.cpp
new file mode 100644
index 0000000..35ad150
--- /dev/null
+++ b/statsd/src/utils/DbUtils.cpp
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define STATSD_DEBUG false // STOPSHIP if true
+
+#include "Log.h"
+
+#include "utils/DbUtils.h"
+
+#include <android/api-level.h>
+
+#include "FieldValue.h"
+#include "android-base/properties.h"
+#include "android-base/stringprintf.h"
+#include "stats_log_util.h"
+#include "storage/StorageManager.h"
+
+namespace android {
+namespace os {
+namespace statsd {
+namespace dbutils {
+
+using ::android::os::statsd::FLOAT;
+using ::android::os::statsd::INT;
+using ::android::os::statsd::LONG;
+using ::android::os::statsd::StorageManager;
+using ::android::os::statsd::STRING;
+using base::GetProperty;
+using base::StringPrintf;
+
+const string TABLE_NAME_PREFIX = "metric_";
+const string COLUMN_NAME_ATOM_TAG = "atomId";
+const string COLUMN_NAME_EVENT_ELAPSED_CLOCK_NS = "elapsedTimestampNs";
+const string COLUMN_NAME_EVENT_WALL_CLOCK_NS = "wallTimestampNs";
+
+const string COLUMN_NAME_SDK_VERSION = "sdkVersion";
+const string COLUMN_NAME_MODEL = "model";
+const string COLUMN_NAME_PRODUCT = "product";
+const string COLUMN_NAME_HARDWARE = "hardware";
+const string COLUMN_NAME_DEVICE = "device";
+const string COLUMN_NAME_BUILD = "osBuild";
+const string COLUMN_NAME_FINGERPRINT = "fingerprint";
+const string COLUMN_NAME_BRAND = "brand";
+const string COLUMN_NAME_MANUFACTURER = "manufacturer";
+const string COLUMN_NAME_BOARD = "board";
+
+static std::vector<std::string> getExpectedTableSchema(const LogEvent& logEvent) {
+ vector<std::string> result;
+ for (const FieldValue& fieldValue : logEvent.getValues()) {
+ if (fieldValue.mField.getDepth() > 0) {
+ // Repeated fields are not supported.
+ continue;
+ }
+ switch (fieldValue.mValue.getType()) {
+ case INT:
+ case LONG:
+ result.push_back("INTEGER");
+ break;
+ case STRING:
+ result.push_back("TEXT");
+ break;
+ case FLOAT:
+ result.push_back("REAL");
+ break;
+ default:
+ // Byte array fields are not supported.
+ break;
+ }
+ }
+ return result;
+}
+
+static int integrityCheckCallback(void*, int colCount, char** queryResults, char**) {
+ if (colCount == 0 || strcmp(queryResults[0], "ok") != 0) {
+ // Returning 1 is an error code that causes exec to stop and error.
+ return 1;
+ }
+ return 0;
+}
+
+string getDbName(const ConfigKey& key) {
+ return StringPrintf("%s/%d_%lld.db", STATS_RESTRICTED_DATA_DIR, key.GetUid(),
+ (long long)key.GetId());
+}
+
+static string getCreateSqlString(const int64_t metricId, const LogEvent& event) {
+ string result = StringPrintf("CREATE TABLE IF NOT EXISTS %s%s", TABLE_NAME_PREFIX.c_str(),
+ reformatMetricId(metricId).c_str());
+ result += StringPrintf("(%s INTEGER,%s INTEGER,%s INTEGER,", COLUMN_NAME_ATOM_TAG.c_str(),
+ COLUMN_NAME_EVENT_ELAPSED_CLOCK_NS.c_str(),
+ COLUMN_NAME_EVENT_WALL_CLOCK_NS.c_str());
+ for (size_t fieldId = 1; fieldId <= event.getValues().size(); ++fieldId) {
+ const FieldValue& fieldValue = event.getValues()[fieldId - 1];
+ if (fieldValue.mField.getDepth() > 0) {
+ // Repeated fields are not supported.
+ continue;
+ }
+ switch (fieldValue.mValue.getType()) {
+ case INT:
+ case LONG:
+ result += StringPrintf("field_%d INTEGER,", fieldValue.mField.getPosAtDepth(0));
+ break;
+ case STRING:
+ result += StringPrintf("field_%d TEXT,", fieldValue.mField.getPosAtDepth(0));
+ break;
+ case FLOAT:
+ result += StringPrintf("field_%d REAL,", fieldValue.mField.getPosAtDepth(0));
+ break;
+ default:
+ // Byte array fields are not supported.
+ break;
+ }
+ }
+ result.pop_back();
+ result += ") STRICT;";
+ return result;
+}
+
+string reformatMetricId(const int64_t metricId) {
+ return metricId < 0 ? StringPrintf("n%lld", (long long)metricId * -1)
+ : StringPrintf("%lld", (long long)metricId);
+}
+
+bool createTableIfNeeded(const ConfigKey& key, const int64_t metricId, const LogEvent& event) {
+ const string dbName = getDbName(key);
+ sqlite3* db;
+ if (sqlite3_open(dbName.c_str(), &db) != SQLITE_OK) {
+ sqlite3_close(db);
+ return false;
+ }
+
+ char* error = nullptr;
+ string zSql = getCreateSqlString(metricId, event);
+ sqlite3_exec(db, zSql.c_str(), nullptr, nullptr, &error);
+ sqlite3_close(db);
+ if (error) {
+ ALOGW("Failed to create table to db: %s", error);
+ return false;
+ }
+ return true;
+}
+
+bool isEventCompatible(const ConfigKey& key, const int64_t metricId, const LogEvent& event) {
+ const string dbName = getDbName(key);
+ sqlite3* db;
+ if (sqlite3_open(dbName.c_str(), &db) != SQLITE_OK) {
+ sqlite3_close(db);
+ return false;
+ }
+ string zSql = StringPrintf("PRAGMA table_info(metric_%s);", reformatMetricId(metricId).c_str());
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ if (!query(key, zSql, rows, columnTypes, columnNames, err)) {
+ ALOGE("Failed to check table schema for metric %lld: %s", (long long)metricId, err.c_str());
+ sqlite3_close(db);
+ return false;
+ }
+ // Sample query result
+ // cid name type notnull dflt_value pk
+ // --- ----------------- ------- ------- ---------- --
+ // 0 atomId INTEGER 0 (null) 0
+ // 1 elapsedTimestampNs INTEGER 0 (null) 0
+ // 2 wallTimestampNs INTEGER 0 (null) 0
+ // 3 field_1 INTEGER 0 (null) 0
+ // 4 field_2 TEXT 0 (null) 0
+ std::vector<string> tableSchema;
+ for (size_t i = 3; i < rows.size(); ++i) { // Atom fields start at the third row
+ tableSchema.push_back(rows[i][2]); // The third column stores the data type for the column
+ }
+ sqlite3_close(db);
+ // An empty rows vector implies the table has not yet been created.
+ return rows.size() == 0 || getExpectedTableSchema(event) == tableSchema;
+}
+
+bool deleteTable(const ConfigKey& key, const int64_t metricId) {
+ const string dbName = getDbName(key);
+ sqlite3* db;
+ if (sqlite3_open(dbName.c_str(), &db) != SQLITE_OK) {
+ sqlite3_close(db);
+ return false;
+ }
+ string zSql = StringPrintf("DROP TABLE metric_%s", reformatMetricId(metricId).c_str());
+ char* error = nullptr;
+ sqlite3_exec(db, zSql.c_str(), nullptr, nullptr, &error);
+ sqlite3_close(db);
+ if (error) {
+ ALOGW("Failed to drop table from db: %s", error);
+ return false;
+ }
+ return true;
+}
+
+void deleteDb(const ConfigKey& key) {
+ const string dbName = getDbName(key);
+ StorageManager::deleteFile(dbName.c_str());
+}
+
+sqlite3* getDb(const ConfigKey& key) {
+ const string dbName = getDbName(key);
+ sqlite3* db;
+ if (sqlite3_open(dbName.c_str(), &db) == SQLITE_OK) {
+ return db;
+ }
+ return nullptr;
+}
+
+void closeDb(sqlite3* db) {
+ sqlite3_close(db);
+}
+
+static bool getInsertSqlStmt(sqlite3* db, sqlite3_stmt** stmt, const int64_t metricId,
+ const vector<LogEvent>& events, string& err) {
+ string result =
+ StringPrintf("INSERT INTO metric_%s VALUES", reformatMetricId(metricId).c_str());
+ for (auto& logEvent : events) {
+ result += StringPrintf("(%d, %lld, %lld,", logEvent.GetTagId(),
+ (long long)logEvent.GetElapsedTimestampNs(),
+ (long long)logEvent.GetLogdTimestampNs());
+ for (auto& fieldValue : logEvent.getValues()) {
+ if (fieldValue.mField.getDepth() > 0 || fieldValue.mValue.getType() == STORAGE) {
+ // Repeated fields and byte fields are not supported.
+ continue;
+ }
+ result += "?,";
+ }
+ result.pop_back();
+ result += "),";
+ }
+ result.pop_back();
+ result += ";";
+ if (sqlite3_prepare_v2(db, result.c_str(), -1, stmt, nullptr) != SQLITE_OK) {
+ err = sqlite3_errmsg(db);
+ return false;
+ }
+ // ? parameters start with an index of 1 from start of query string to the
+ // end.
+ int32_t index = 1;
+ for (auto& logEvent : events) {
+ for (auto& fieldValue : logEvent.getValues()) {
+ if (fieldValue.mField.getDepth() > 0 || fieldValue.mValue.getType() == STORAGE) {
+ // Repeated fields and byte fields are not supported.
+ continue;
+ }
+ switch (fieldValue.mValue.getType()) {
+ case INT:
+ sqlite3_bind_int(*stmt, index, fieldValue.mValue.int_value);
+ break;
+ case LONG:
+ sqlite3_bind_int64(*stmt, index, fieldValue.mValue.long_value);
+ break;
+ case STRING:
+ sqlite3_bind_text(*stmt, index, fieldValue.mValue.str_value.c_str(), -1,
+ SQLITE_STATIC);
+ break;
+ case FLOAT:
+ sqlite3_bind_double(*stmt, index, fieldValue.mValue.float_value);
+ break;
+ default:
+ // Byte array fields are not supported.
+ break;
+ }
+ ++index;
+ }
+ }
+ return true;
+}
+
+bool insert(const ConfigKey& key, const int64_t metricId, const vector<LogEvent>& events,
+ string& error) {
+ const string dbName = getDbName(key);
+ sqlite3* db;
+ if (sqlite3_open(dbName.c_str(), &db) != SQLITE_OK) {
+ error = sqlite3_errmsg(db);
+ sqlite3_close(db);
+ return false;
+ }
+ bool success = insert(db, metricId, events, error);
+ sqlite3_close(db);
+ return success;
+}
+
+bool insert(sqlite3* db, const int64_t metricId, const vector<LogEvent>& events, string& error) {
+ sqlite3_stmt* stmt = nullptr;
+ if (!getInsertSqlStmt(db, &stmt, metricId, events, error)) {
+ ALOGW("Failed to generate prepared sql insert query %s", error.c_str());
+ sqlite3_finalize(stmt);
+ return false;
+ }
+ if (sqlite3_step(stmt) != SQLITE_DONE) {
+ error = sqlite3_errmsg(db);
+ ALOGW("Failed to insert data to db: %s", error.c_str());
+ sqlite3_finalize(stmt);
+ return false;
+ }
+ sqlite3_finalize(stmt);
+ return true;
+}
+
+bool query(const ConfigKey& key, const string& zSql, vector<vector<string>>& rows,
+ vector<int32_t>& columnTypes, vector<string>& columnNames, string& err) {
+ const string dbName = getDbName(key);
+ sqlite3* db;
+ if (sqlite3_open_v2(dbName.c_str(), &db, SQLITE_OPEN_READONLY, nullptr) != SQLITE_OK) {
+ err = sqlite3_errmsg(db);
+ sqlite3_close(db);
+ return false;
+ }
+ sqlite3_stmt* stmt;
+ if (sqlite3_prepare_v2(db, zSql.c_str(), -1, &stmt, nullptr) != SQLITE_OK) {
+ err = sqlite3_errmsg(db);
+ sqlite3_finalize(stmt);
+ sqlite3_close(db);
+ return false;
+ }
+ int result = sqlite3_step(stmt);
+ bool firstIter = true;
+ while (result == SQLITE_ROW) {
+ int colCount = sqlite3_column_count(stmt);
+ vector<string> rowData(colCount);
+ for (int i = 0; i < colCount; ++i) {
+ if (firstIter) {
+ int32_t columnType = sqlite3_column_type(stmt, i);
+ // Needed to convert to java compatible cursor types. See AbstractCursor#getType()
+ if (columnType == 5) {
+ columnType = 0; // Remap 5 (null type) to 0 for java cursor
+ }
+ columnTypes.push_back(columnType);
+ columnNames.push_back(reinterpret_cast<const char*>(sqlite3_column_name(stmt, i)));
+ }
+ const unsigned char* textResult = sqlite3_column_text(stmt, i);
+ string colData =
+ textResult != nullptr ? string(reinterpret_cast<const char*>(textResult)) : "";
+ rowData[i] = std::move(colData);
+ }
+ rows.push_back(std::move(rowData));
+ firstIter = false;
+ result = sqlite3_step(stmt);
+ }
+ sqlite3_finalize(stmt);
+ if (result != SQLITE_DONE) {
+ err = sqlite3_errmsg(db);
+ sqlite3_close(db);
+ return false;
+ }
+ sqlite3_close(db);
+ return true;
+}
+
+bool flushTtl(sqlite3* db, const int64_t metricId, const int64_t ttlWallClockNs) {
+ string zSql = StringPrintf("DELETE FROM %s%s WHERE %s <= %lld", TABLE_NAME_PREFIX.c_str(),
+ reformatMetricId(metricId).c_str(),
+ COLUMN_NAME_EVENT_WALL_CLOCK_NS.c_str(), (long long)ttlWallClockNs);
+
+ char* error = nullptr;
+ sqlite3_exec(db, zSql.c_str(), nullptr, nullptr, &error);
+ if (error) {
+ ALOGW("Failed to enforce ttl: %s", error);
+ return false;
+ }
+ return true;
+}
+
+void verifyIntegrityAndDeleteIfNecessary(const ConfigKey& configKey) {
+ const string dbName = getDbName(configKey);
+ sqlite3* db;
+ if (sqlite3_open(dbName.c_str(), &db) != SQLITE_OK) {
+ sqlite3_close(db);
+ return;
+ }
+ string zSql = "PRAGMA integrity_check";
+
+ char* error = nullptr;
+ sqlite3_exec(db, zSql.c_str(), integrityCheckCallback, nullptr, &error);
+ if (error) {
+ StatsdStats::getInstance().noteDbCorrupted(configKey);
+ ALOGW("Integrity Check failed %s", error);
+ sqlite3_close(db);
+ deleteDb(configKey);
+ return;
+ }
+ sqlite3_close(db);
+}
+
+static bool getDeviceInfoInsertStmt(sqlite3* db, sqlite3_stmt** stmt, string error) {
+ string insertSql = StringPrintf("INSERT INTO device_info VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
+ if (sqlite3_prepare_v2(db, insertSql.c_str(), -1, stmt, nullptr) != SQLITE_OK) {
+ error = sqlite3_errmsg(db);
+ return false;
+ }
+
+ // ? parameters start with an index of 1 from start of query string to the end.
+ int32_t index = 1;
+
+ int32_t sdkVersion = android_get_device_api_level();
+ sqlite3_bind_int(*stmt, index, sdkVersion);
+ ++index;
+
+ string model = GetProperty("ro.product.model", "(unknown)");
+ sqlite3_bind_text(*stmt, index, model.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ string product = GetProperty("ro.product.name", "(unknown)");
+ sqlite3_bind_text(*stmt, index, product.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ string hardware = GetProperty("ro.hardware", "(unknown)");
+ sqlite3_bind_text(*stmt, index, hardware.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ string device = GetProperty("ro.product.device", "(unknown)");
+ sqlite3_bind_text(*stmt, index, device.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ string osBuild = GetProperty("ro.build.id", "(unknown)");
+ sqlite3_bind_text(*stmt, index, osBuild.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ string fingerprint = GetProperty("ro.build.fingerprint", "(unknown)");
+ sqlite3_bind_text(*stmt, index, fingerprint.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ string brand = GetProperty("ro.product.brand", "(unknown)");
+ sqlite3_bind_text(*stmt, index, brand.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ string manufacturer = GetProperty("ro.product.manufacturer", "(unknown)");
+ sqlite3_bind_text(*stmt, index, manufacturer.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ string board = GetProperty("ro.product.board", "(unknown)");
+ sqlite3_bind_text(*stmt, index, board.c_str(), -1, SQLITE_TRANSIENT);
+ ++index;
+
+ return true;
+}
+
+bool updateDeviceInfoTable(const ConfigKey& key, string& error) {
+ const string dbName = getDbName(key);
+ sqlite3* db;
+ if (sqlite3_open(dbName.c_str(), &db) != SQLITE_OK) {
+ error = sqlite3_errmsg(db);
+ sqlite3_close(db);
+ return false;
+ }
+
+ string dropTableSql = "DROP TABLE device_info";
+ // Ignore possible error result code if table has not yet been created.
+ sqlite3_exec(db, dropTableSql.c_str(), nullptr, nullptr, nullptr);
+
+ string createTableSql = StringPrintf(
+ "CREATE TABLE device_info(%s INTEGER, %s TEXT, %s TEXT, %s TEXT, %s TEXT, %s TEXT, %s "
+ "TEXT, %s TEXT, %s TEXT, %s TEXT) "
+ "STRICT",
+ COLUMN_NAME_SDK_VERSION.c_str(), COLUMN_NAME_MODEL.c_str(), COLUMN_NAME_PRODUCT.c_str(),
+ COLUMN_NAME_HARDWARE.c_str(), COLUMN_NAME_DEVICE.c_str(), COLUMN_NAME_BUILD.c_str(),
+ COLUMN_NAME_FINGERPRINT.c_str(), COLUMN_NAME_BRAND.c_str(),
+ COLUMN_NAME_MANUFACTURER.c_str(), COLUMN_NAME_BOARD.c_str());
+ if (sqlite3_exec(db, createTableSql.c_str(), nullptr, nullptr, nullptr) != SQLITE_OK) {
+ error = sqlite3_errmsg(db);
+ ALOGW("Failed to create device info table %s", error.c_str());
+ sqlite3_close(db);
+ return false;
+ }
+
+ sqlite3_stmt* stmt = nullptr;
+ if (!getDeviceInfoInsertStmt(db, &stmt, error)) {
+ ALOGW("Failed to generate device info prepared sql insert query %s", error.c_str());
+ sqlite3_finalize(stmt);
+ sqlite3_close(db);
+ return false;
+ }
+
+ if (sqlite3_step(stmt) != SQLITE_DONE) {
+ error = sqlite3_errmsg(db);
+ ALOGW("Failed to insert data to device info table: %s", error.c_str());
+ sqlite3_finalize(stmt);
+ sqlite3_close(db);
+ return false;
+ }
+ sqlite3_finalize(stmt);
+ sqlite3_close(db);
+ return true;
+}
+} // namespace dbutils
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/src/utils/DbUtils.h b/statsd/src/utils/DbUtils.h
new file mode 100644
index 0000000..8a561e6
--- /dev/null
+++ b/statsd/src/utils/DbUtils.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <sqlite3.h>
+
+#include "config/ConfigKey.h"
+#include "logd/LogEvent.h"
+
+using std::string;
+using std::vector;
+
+namespace android {
+namespace os {
+namespace statsd {
+namespace dbutils {
+
+#define STATS_RESTRICTED_DATA_DIR "/data/misc/stats-data/restricted-data"
+
+inline int32_t getDbVersion() {
+ return SQLITE_VERSION_NUMBER;
+};
+
+string getDbName(const ConfigKey& key);
+
+string reformatMetricId(const int64_t metricId);
+
+/* Creates a new data table for a specified metric if one does not yet exist. */
+bool createTableIfNeeded(const ConfigKey& key, const int64_t metricId, const LogEvent& event);
+
+/* Checks whether the table schema for the given metric matches the event.
+ * Returns true if the table has not yet been created.
+ */
+bool isEventCompatible(const ConfigKey& key, const int64_t metricId, const LogEvent& event);
+
+/* Deletes a data table for the specified metric. */
+bool deleteTable(const ConfigKey& key, const int64_t metricId);
+
+/* Deletes the SQLite db data file. */
+void deleteDb(const ConfigKey& key);
+
+/* Gets a handle to the sqlite db. You must call closeDb to free the allocated memory.
+ * Returns a nullptr if an error occurs.
+ */
+sqlite3* getDb(const ConfigKey& key);
+
+/* Closes the handle to the sqlite db. */
+void closeDb(sqlite3* db);
+
+/* Inserts new data into the specified metric data table.
+ * A temp sqlite handle is created using the ConfigKey.
+ */
+bool insert(const ConfigKey& key, const int64_t metricId, const vector<LogEvent>& events,
+ string& error);
+
+/* Inserts new data into the specified sqlite db handle. */
+bool insert(sqlite3* db, const int64_t metricId, const vector<LogEvent>& events, string& error);
+
+/* Executes a sql query on the specified SQLite db.
+ * A temp sqlite handle is created using the ConfigKey.
+ */
+bool query(const ConfigKey& key, const string& zSql, vector<vector<string>>& rows,
+ vector<int32_t>& columnTypes, vector<string>& columnNames, string& err);
+
+bool flushTtl(sqlite3* db, const int64_t metricId, const int64_t ttlWallClockNs);
+
+/* Checks for database corruption and deletes the db if it is corrupted. */
+void verifyIntegrityAndDeleteIfNecessary(const ConfigKey& key);
+
+/* Creates and updates the device info table for the given configKey. */
+bool updateDeviceInfoTable(const ConfigKey& key, string& error);
+
+} // namespace dbutils
+} // namespace statsd
+} // namespace os
+} // namespace android
\ No newline at end of file
diff --git a/statsd/src/utils/RestrictedPolicyManager.cpp b/statsd/src/utils/RestrictedPolicyManager.cpp
new file mode 100644
index 0000000..3ae75a4
--- /dev/null
+++ b/statsd/src/utils/RestrictedPolicyManager.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "utils/RestrictedPolicyManager.h"
+
+#include "stats_annotations.h"
+
+#define STATSD_DEBUG false // STOPSHIP if true
+
+namespace android {
+namespace os {
+namespace statsd {
+
+RestrictedPolicyManager& RestrictedPolicyManager::getInstance() {
+ static RestrictedPolicyManager policyInstance;
+ return policyInstance;
+}
+
+int32_t RestrictedPolicyManager::getRestrictedCategoryTtl(
+ const StatsdRestrictionCategory categoryId) {
+ return mRestrictionCategoryTtlInDaysMap.find(categoryId) !=
+ mRestrictionCategoryTtlInDaysMap.end()
+ ? mRestrictionCategoryTtlInDaysMap.at(categoryId)
+ : DEFAULT_RESTRICTED_CATEGORY_TTL_DAYS;
+}
+
+} // namespace statsd
+} // namespace os
+} // namespace android
\ No newline at end of file
diff --git a/statsd/src/utils/RestrictedPolicyManager.h b/statsd/src/utils/RestrictedPolicyManager.h
new file mode 100644
index 0000000..34d1fa3
--- /dev/null
+++ b/statsd/src/utils/RestrictedPolicyManager.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <stdlib.h>
+
+#include <map>
+
+#include "stats_annotations.h"
+
+using std::map;
+
+namespace android {
+namespace os {
+namespace statsd {
+
+#define DEFAULT_RESTRICTED_CATEGORY_TTL_DAYS 7
+
+// Restricted categories used internally by statsd.
+enum StatsdRestrictionCategory : int32_t {
+ CATEGORY_UNKNOWN = -1,
+ CATEGORY_NO_RESTRICTION = 0,
+ CATEGORY_DIAGNOSTIC = ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC,
+ CATEGORY_SYSTEM_INTELLIGENCE = ASTATSLOG_RESTRICTION_CATEGORY_SYSTEM_INTELLIGENCE,
+ CATEGORY_AUTHENTICATION = ASTATSLOG_RESTRICTION_CATEGORY_AUTHENTICATION,
+ CATEGORY_FRAUD_AND_ABUSE = ASTATSLOG_RESTRICTION_CATEGORY_FRAUD_AND_ABUSE,
+};
+
+// Single instance shared across the process.
+class RestrictedPolicyManager {
+public:
+ static RestrictedPolicyManager& getInstance();
+ ~RestrictedPolicyManager(){};
+
+ // Gets the TTL in days for a particular restricted category. Returns the default for unknown
+ // categories.
+ int32_t getRestrictedCategoryTtl(const StatsdRestrictionCategory categoryId);
+
+private:
+ std::map<StatsdRestrictionCategory, int32_t> mRestrictionCategoryTtlInDaysMap;
+};
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/src/utils/ShardOffsetProvider.cpp b/statsd/src/utils/ShardOffsetProvider.cpp
new file mode 100644
index 0000000..4a0df97
--- /dev/null
+++ b/statsd/src/utils/ShardOffsetProvider.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ShardOffsetProvider.h"
+
+#include <errno.h>
+#include <sys/random.h>
+#include <unistd.h>
+
+#include <chrono>
+
+namespace android {
+namespace os {
+namespace statsd {
+
+ShardOffsetProvider::ShardOffsetProvider() {
+ unsigned int seed = 0;
+ // getrandom() reads bytes from urandom source into buf. If getrandom()
+ // is unable to read from urandom source, then it returns -1 and we set
+ // our seed to be time(nullptr) as a fallback.
+ if (TEMP_FAILURE_RETRY(
+ getrandom(static_cast<void*>(&seed), sizeof(unsigned int), GRND_NONBLOCK)) < 0) {
+ seed = time(nullptr);
+ }
+ srand(seed);
+ mShardOffset = rand();
+}
+
+ShardOffsetProvider& ShardOffsetProvider::getInstance() {
+ static ShardOffsetProvider sShardOffsetProvider;
+ return sShardOffsetProvider;
+}
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/src/utils/ShardOffsetProvider.h b/statsd/src/utils/ShardOffsetProvider.h
new file mode 100644
index 0000000..dd6a5a7
--- /dev/null
+++ b/statsd/src/utils/ShardOffsetProvider.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHARD_OFFSET_PROVIDER_H
+#define SHARD_OFFSET_PROVIDER_H
+
+#include <gtest/gtest_prod.h>
+#include <stdlib.h>
+
+namespace android {
+namespace os {
+namespace statsd {
+
+/*
+ * Class is not guarded by any mutex. It is currently thread safe.
+ * Thread safety needs to be considered on all future changes to this class.
+ */
+class ShardOffsetProvider final {
+public:
+ ~ShardOffsetProvider(){};
+
+ uint32_t getShardOffset() const {
+ return mShardOffset;
+ }
+
+ static ShardOffsetProvider& getInstance();
+
+private:
+ ShardOffsetProvider();
+
+ // Only used for testing.
+ void setShardOffset(const uint32_t shardOffset) {
+ mShardOffset = shardOffset;
+ }
+
+ uint32_t mShardOffset;
+
+ FRIEND_TEST(CountMetricE2eTest, TestDimensionalSampling);
+ FRIEND_TEST(DurationMetricE2eTest, TestDimensionalSampling);
+ FRIEND_TEST(GaugeMetricE2ePushedTest, TestDimensionalSampling);
+ FRIEND_TEST(GaugeMetricProducerTest, TestPullDimensionalSampling);
+ FRIEND_TEST(KllMetricE2eTest, TestDimensionalSampling);
+ FRIEND_TEST(NumericValueMetricProducerTest, TestDimensionalSampling);
+ FRIEND_TEST(StatsdStatsTest, TestShardOffsetProvider);
+};
+
+} // namespace statsd
+} // namespace os
+} // namespace android
+#endif // METRIC_PRODUCER_H
\ No newline at end of file
diff --git a/statsd/tests/ConfigManager_test.cpp b/statsd/tests/ConfigManager_test.cpp
index 7b8f74d..f03ccf5 100644
--- a/statsd/tests/ConfigManager_test.cpp
+++ b/statsd/tests/ConfigManager_test.cpp
@@ -147,6 +147,8 @@
EXPECT_CALL(*(listener.get()), OnConfigRemoved(ConfigKeyEq(2, StringToId("xxx"))));
EXPECT_CALL(*(listener.get()), OnConfigRemoved(ConfigKeyEq(2, StringToId("yyy"))));
EXPECT_CALL(*(listener.get()), OnConfigRemoved(ConfigKeyEq(2, StringToId("zzz"))));
+ EXPECT_CALL(*(listener.get()), OnConfigRemoved(ConfigKeyEq(1, StringToId("aaa"))));
+ EXPECT_CALL(*(listener.get()), OnConfigRemoved(ConfigKeyEq(3, StringToId("bbb"))));
manager->StartupForTest();
manager->UpdateConfig(ConfigKey(1, StringToId("aaa")), config);
@@ -156,4 +158,99 @@
manager->UpdateConfig(ConfigKey(3, StringToId("bbb")), config);
manager->RemoveConfigs(2);
+ manager->RemoveConfigs(1);
+ manager->RemoveConfigs(3);
}
+
+TEST(ConfigManagerTest, TestSendRestrictedMetricsChangedBroadcast) {
+ const string configPackage = "pkg";
+ const int64_t configId = 123;
+ const int32_t callingUid = 456;
+ const vector<int64_t> metricIds = {100, 200, 999};
+
+ shared_ptr<MockPendingIntentRef> pir = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir, sendRestrictedMetricsChangedBroadcast(metricIds))
+ .Times(1)
+ .WillRepeatedly(Invoke([](const vector<int64_t>&) { return Status::ok(); }));
+
+ sp<ConfigManager> manager = new ConfigManager();
+ manager->SetRestrictedMetricsChangedReceiver(configPackage, configId, callingUid, pir);
+
+ manager->SendRestrictedMetricsBroadcast({configPackage}, configId, {callingUid}, metricIds);
+}
+
+TEST(ConfigManagerTest, TestSendRestrictedMetricsChangedBroadcastMultipleConfigs) {
+ const string package1 = "pkg1", package2 = "pkg2", package3 = "pkg3";
+ const int64_t configId = 123;
+ const int32_t callingUid1 = 456, callingUid2 = 789, callingUid3 = 1111;
+ const vector<int64_t> metricIds1 = {100, 200, 999}, metricIds2 = {101}, metricIds3 = {202, 101};
+
+ shared_ptr<MockPendingIntentRef> pir1 = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir1, sendRestrictedMetricsChangedBroadcast(metricIds1))
+ .Times(3)
+ .WillRepeatedly(Invoke([](const vector<int64_t>&) { return Status::ok(); }));
+ EXPECT_CALL(*pir1, sendRestrictedMetricsChangedBroadcast(metricIds2))
+ .Times(1)
+ .WillRepeatedly(Invoke([](const vector<int64_t>&) { return Status::ok(); }));
+
+ shared_ptr<MockPendingIntentRef> pir2 = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir2, sendRestrictedMetricsChangedBroadcast(metricIds1))
+ .Times(1)
+ .WillRepeatedly(Invoke([](const vector<int64_t>&) { return Status::ok(); }));
+
+ shared_ptr<MockPendingIntentRef> pir3 = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir3, sendRestrictedMetricsChangedBroadcast(metricIds1))
+ .Times(1)
+ .WillRepeatedly(Invoke([](const vector<int64_t>&) { return Status::ok(); }));
+ EXPECT_CALL(*pir3, sendRestrictedMetricsChangedBroadcast(metricIds2))
+ .Times(1)
+ .WillRepeatedly(Invoke([](const vector<int64_t>&) { return Status::ok(); }));
+
+ shared_ptr<MockPendingIntentRef> pir4 = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir4, sendRestrictedMetricsChangedBroadcast(metricIds1))
+ .Times(2)
+ .WillRepeatedly(Invoke([](const vector<int64_t>&) { return Status::ok(); }));
+
+ shared_ptr<MockPendingIntentRef> pir5 = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir5, sendRestrictedMetricsChangedBroadcast(metricIds3))
+ .Times(1)
+ .WillRepeatedly(Invoke([](const vector<int64_t>&) { return Status::ok(); }));
+
+ sp<ConfigManager> manager = new ConfigManager();
+ manager->SetRestrictedMetricsChangedReceiver(package1, configId, callingUid1, pir1);
+ manager->SetRestrictedMetricsChangedReceiver(package2, configId, callingUid2, pir2);
+ manager->SetRestrictedMetricsChangedReceiver(package1, configId, callingUid2, pir3);
+ manager->SetRestrictedMetricsChangedReceiver(package2, configId, callingUid1, pir4);
+ manager->SetRestrictedMetricsChangedReceiver(package3, configId, callingUid3, pir5);
+
+ // Invoke pir 1, 2, 3, 4.
+ manager->SendRestrictedMetricsBroadcast({package1, package2}, configId,
+ {callingUid1, callingUid2}, metricIds1);
+ // Invoke pir 1 only
+ manager->SendRestrictedMetricsBroadcast({package1}, configId, {callingUid1}, metricIds1);
+ // Invoke pir 1, 4.
+ manager->SendRestrictedMetricsBroadcast({package1, package2}, configId, {callingUid1},
+ metricIds1);
+ // Invoke pir 1, 3
+ manager->SendRestrictedMetricsBroadcast({package1}, configId, {callingUid1, callingUid2},
+ metricIds2);
+ // Invoke pir 5
+ manager->SendRestrictedMetricsBroadcast({package3}, configId, {callingUid1, callingUid3},
+ metricIds3);
+}
+
+TEST(ConfigManagerTest, TestRemoveRestrictedMetricsChangedBroadcast) {
+ const string configPackage = "pkg";
+ const int64_t configId = 123;
+ const int32_t callingUid = 456;
+ const vector<int64_t> metricIds = {100, 200, 999};
+
+ shared_ptr<MockPendingIntentRef> pir = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir, sendRestrictedMetricsChangedBroadcast(metricIds)).Times(0);
+
+ sp<ConfigManager> manager = new ConfigManager();
+ manager->SetRestrictedMetricsChangedReceiver(configPackage, configId, callingUid, pir);
+ manager->RemoveRestrictedMetricsChangedReceiver(configPackage, configId, callingUid);
+
+ manager->SendRestrictedMetricsBroadcast({configPackage}, configId, {callingUid}, metricIds);
+}
\ No newline at end of file
diff --git a/statsd/tests/FieldValue_test.cpp b/statsd/tests/FieldValue_test.cpp
index ff3e54a..64be559 100644
--- a/statsd/tests/FieldValue_test.cpp
+++ b/statsd/tests/FieldValue_test.cpp
@@ -132,7 +132,8 @@
std::vector<string> attributionTags = {"location1", "location2", "location3"};
LogEvent event(/*uid=*/0, /*pid=*/0);
- makeLogEvent(&event, 10 /*atomId*/, 1012345, attributionUids, attributionTags, "some value");
+ makeLogEvent(&event, 10 /*atomId*/, /*timestamp=*/1012345, attributionUids, attributionTags,
+ "some value");
HashableDimensionKey output;
filterValues(matchers, event.getValues(), &output);
@@ -227,6 +228,142 @@
EXPECT_EQ((int32_t)13, output.getValues()[2].mValue.int_value);
}
+TEST(AtomMatcherTest, TestFilterWithOneMatcher) {
+ FieldMatcher matcher;
+ matcher.set_field(10);
+ FieldMatcher* child = matcher.add_child();
+ child->set_field(2);
+
+ vector<Matcher> matchers;
+ translateFieldMatcher(matcher, &matchers);
+
+ std::vector<int> attributionUids = {1111, 2222, 3333};
+ std::vector<string> attributionTags = {"location1", "location2", "location3"};
+
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event, 10 /*atomId*/, /*timestamp=*/1012345, attributionUids, attributionTags,
+ "some value");
+ FieldValue value;
+
+ EXPECT_TRUE(filterValues(matchers[0], event.getValues(), &value));
+ EXPECT_EQ((int32_t)0x20000, value.mField.getField());
+ EXPECT_EQ("some value", value.mValue.str_value);
+}
+
+TEST(AtomMatcherTest, TestFilterWithOneMatcher_PositionFIRST) {
+ FieldMatcher matcher;
+ matcher.set_field(10);
+ FieldMatcher* child = matcher.add_child();
+ child->set_field(1);
+ child->set_position(Position::FIRST);
+ child->add_child()->set_field(1);
+
+ vector<Matcher> matchers;
+ translateFieldMatcher(matcher, &matchers);
+
+ std::vector<int> attributionUids = {1111, 2222, 3333};
+ std::vector<string> attributionTags = {"location1", "location2", "location3"};
+
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event, 10 /*atomId*/, /*timestamp=*/1012345, attributionUids, attributionTags,
+ "some value");
+ FieldValue value;
+
+ // Should only match the first field.
+ EXPECT_TRUE(filterValues(matchers[0], event.getValues(), &value));
+ EXPECT_EQ((int32_t)0x02010101, value.mField.getField());
+ EXPECT_EQ((int32_t)1111, value.mValue.int_value);
+}
+
+TEST(AtomMatcherTest, TestFilterWithOneMatcher_PositionLAST) {
+ FieldMatcher matcher;
+ matcher.set_field(10);
+ FieldMatcher* child = matcher.add_child();
+ child->set_field(1);
+ child->set_position(Position::LAST);
+ child->add_child()->set_field(1);
+
+ vector<Matcher> matchers;
+ translateFieldMatcher(matcher, &matchers);
+
+ std::vector<int> attributionUids = {1111, 2222, 3333};
+ std::vector<string> attributionTags = {"location1", "location2", "location3"};
+
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event, 10 /*atomId*/, /*timestamp=*/1012345, attributionUids, attributionTags,
+ "some value");
+ FieldValue value;
+
+ // Should only match the last field.
+ EXPECT_TRUE(filterValues(matchers[0], event.getValues(), &value));
+ EXPECT_EQ((int32_t)0x02018301, value.mField.getField());
+ EXPECT_EQ((int32_t)3333, value.mValue.int_value);
+}
+
+TEST(AtomMatcherTest, TestFilterWithOneMatcher_PositionALL) {
+ FieldMatcher matcher;
+ matcher.set_field(10);
+ FieldMatcher* child = matcher.add_child();
+ child->set_field(1);
+ child->set_position(Position::ALL);
+ child->add_child()->set_field(1);
+
+ vector<Matcher> matchers;
+ translateFieldMatcher(matcher, &matchers);
+
+ std::vector<int> attributionUids = {1111, 2222, 3333};
+ std::vector<string> attributionTags = {"location1", "location2", "location3"};
+
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event, 10 /*atomId*/, 1012345, attributionUids, attributionTags, "some value");
+ FieldValue value;
+
+ // Can't filter with position ALL matcher.
+ EXPECT_FALSE(filterValues(matchers[0], event.getValues(), &value));
+}
+
+TEST(AtomMatcherTest, TestFilterWithOneMatcher_DifferentField) {
+ FieldMatcher matcher;
+ matcher.set_field(10);
+ FieldMatcher* child = matcher.add_child();
+ child->set_field(3);
+
+ vector<Matcher> matchers;
+ translateFieldMatcher(matcher, &matchers);
+
+ std::vector<int> attributionUids = {1111, 2222, 3333};
+ std::vector<string> attributionTags = {"location1", "location2", "location3"};
+
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event, 10 /*atomId*/, /*timestamp=*/1012345, attributionUids, attributionTags,
+ "some value");
+ FieldValue value;
+
+ // Shouldn't match any fields because matcher is looking for field 3.
+ EXPECT_FALSE(filterValues(matchers[0], event.getValues(), &value));
+}
+
+TEST(AtomMatcherTest, TestFilterWithOneMatcher_EmptyAttributionUids) {
+ FieldMatcher matcher;
+ matcher.set_field(10);
+ FieldMatcher* child = matcher.add_child();
+ child->set_field(1);
+ child->set_position(Position::ALL);
+ child->add_child()->set_field(1);
+
+ vector<Matcher> matchers;
+ translateFieldMatcher(matcher, &matchers);
+
+ std::vector<string> attributionTags = {"location1", "location2", "location3"};
+
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event, 10 /*atomId*/, /*timestamp=*/1012345, {}, attributionTags, "some value");
+ FieldValue value;
+
+ // Shouldn't match any fields because field 1 is empty.
+ EXPECT_FALSE(filterValues(matchers[0], event.getValues(), &value));
+}
+
TEST(AtomMatcherTest, TestSubDimension) {
HashableDimensionKey dim;
@@ -792,6 +929,111 @@
EXPECT_FALSE(isPrimitiveRepeatedField(field7));
}
+TEST(FieldValueTest, TestShouldKeepSampleInt) {
+ int shardOffset = 5;
+ int shardCount = 2;
+ int pos1[] = {1, 1, 1};
+
+ Field field(1, pos1, 2);
+
+ Value value1((int32_t)1001);
+ Value value2((int32_t)1002);
+
+ FieldValue fieldValue1(field, value1);
+ FieldValue fieldValue2(field, value2);
+
+ EXPECT_TRUE(shouldKeepSample(fieldValue1, shardOffset, shardCount));
+ EXPECT_FALSE(shouldKeepSample(fieldValue2, shardOffset, shardCount));
+}
+
+TEST(FieldValueTest, TestShouldKeepSampleLong) {
+ int shardOffset = 5;
+ int shardCount = 2;
+ int pos1[] = {1, 1, 1};
+
+ Field field(1, pos1, 2);
+
+ Value value1((int64_t)1001L);
+ Value value2((int64_t)1005L);
+
+ FieldValue fieldValue1(field, value1);
+ FieldValue fieldValue2(field, value2);
+
+ EXPECT_FALSE(shouldKeepSample(fieldValue1, shardOffset, shardCount));
+ EXPECT_TRUE(shouldKeepSample(fieldValue2, shardOffset, shardCount));
+}
+
+TEST(FieldValueTest, TestShouldKeepSampleFloat) {
+ int shardOffset = 5;
+ int shardCount = 2;
+ int pos1[] = {1, 1, 1};
+
+ Field field(1, pos1, 2);
+
+ Value value1((float)10.5);
+ Value value2((float)3.9);
+
+ FieldValue fieldValue1(field, value1);
+ FieldValue fieldValue2(field, value2);
+
+ EXPECT_TRUE(shouldKeepSample(fieldValue1, shardOffset, shardCount));
+ EXPECT_FALSE(shouldKeepSample(fieldValue2, shardOffset, shardCount));
+}
+
+TEST(FieldValueTest, TestShouldKeepSampleDouble) {
+ int shardOffset = 5;
+ int shardCount = 2;
+ int pos1[] = {1, 1, 1};
+
+ Field field(1, pos1, 2);
+
+ Value value1((double)1.5);
+ Value value2((double)3.9);
+
+ FieldValue fieldValue1(field, value1);
+ FieldValue fieldValue2(field, value2);
+
+ EXPECT_TRUE(shouldKeepSample(fieldValue1, shardOffset, shardCount));
+ EXPECT_FALSE(shouldKeepSample(fieldValue2, shardOffset, shardCount));
+}
+
+TEST(FieldValueTest, TestShouldKeepSampleString) {
+ int shardOffset = 5;
+ int shardCount = 2;
+ int pos1[] = {1, 1, 1};
+
+ Field field(1, pos1, 2);
+
+ Value value1("str1");
+ Value value2("str2");
+
+ FieldValue fieldValue1(field, value1);
+ FieldValue fieldValue2(field, value2);
+
+ EXPECT_FALSE(shouldKeepSample(fieldValue1, shardOffset, shardCount));
+ EXPECT_TRUE(shouldKeepSample(fieldValue2, shardOffset, shardCount));
+}
+
+TEST(FieldValueTest, TestShouldKeepSampleByteArray) {
+ int shardOffset = 5;
+ int shardCount = 2;
+ int pos1[] = {1, 1, 1};
+
+ Field field(1, pos1, 2);
+
+ vector<uint8_t> message1 = {'\t', 'e', '\0', 's', 't'};
+ vector<uint8_t> message2 = {'\t', 'e', '\0', 's', 't', 't'};
+
+ Value value1(message1);
+ Value value2(message2);
+
+ FieldValue fieldValue1(field, value1);
+ FieldValue fieldValue2(field, value2);
+
+ EXPECT_FALSE(shouldKeepSample(fieldValue1, shardOffset, shardCount));
+ EXPECT_TRUE(shouldKeepSample(fieldValue2, shardOffset, shardCount));
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/HashableDimensionKey_test.cpp b/statsd/tests/HashableDimensionKey_test.cpp
index 8fe555e..b6ddf40 100644
--- a/statsd/tests/HashableDimensionKey_test.cpp
+++ b/statsd/tests/HashableDimensionKey_test.cpp
@@ -129,6 +129,41 @@
EXPECT_TRUE(containsLinkedStateValues(whatKey, primaryKey, mMetric2StateLinks, stateAtomId));
}
+/**
+ * Test that FieldValues with STORAGE values are hashed differently.
+ */
+TEST(HashableDimensionKeyTest, TestHashDimensionStorage) {
+ int pos[] = {1, 1, 1};
+ Field field(1, pos, 1);
+ vector<uint8_t> bytesField1{10, 20, 30};
+ vector<uint8_t> bytesField2{10, 20, 30, 40};
+ FieldValue fieldValue1(field, Value(bytesField1));
+ FieldValue fieldValue2(field, Value(bytesField2));
+ HashableDimensionKey dimKey1;
+ dimKey1.addValue(fieldValue1);
+ HashableDimensionKey dimKey2;
+ dimKey2.addValue(fieldValue2);
+
+ EXPECT_NE(std::hash<HashableDimensionKey>{}(dimKey1),
+ std::hash<HashableDimensionKey>{}(dimKey2));
+}
+/**
+ * Test that FieldValues with DOUBLE values are hashed differently.
+ */
+TEST(HashableDimensionKeyTest, TestHashDimensionDouble) {
+ int pos[] = {1, 1, 1};
+ Field field(1, pos, 1);
+ FieldValue fieldValue1(field, Value((double)100.00));
+ FieldValue fieldValue2(field, Value((double)200.00));
+ HashableDimensionKey dimKey1;
+ dimKey1.addValue(fieldValue1);
+ HashableDimensionKey dimKey2;
+ dimKey2.addValue(fieldValue2);
+
+ EXPECT_NE(std::hash<HashableDimensionKey>{}(dimKey1),
+ std::hash<HashableDimensionKey>{}(dimKey2));
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/LogEntryMatcher_test.cpp b/statsd/tests/LogEntryMatcher_test.cpp
index 8a4fb19..c91a443 100644
--- a/statsd/tests/LogEntryMatcher_test.cpp
+++ b/statsd/tests/LogEntryMatcher_test.cpp
@@ -15,9 +15,9 @@
#include <gtest/gtest.h>
#include <stdio.h>
-#include "annotations.h"
-#include "src/statsd_config.pb.h"
#include "matchers/matcher_util.h"
+#include "src/statsd_config.pb.h"
+#include "stats_annotations.h"
#include "stats_event.h"
#include "stats_log_util.h"
#include "stats_util.h"
@@ -120,7 +120,7 @@
AStatsEvent* statsEvent = AStatsEvent_obtain();
AStatsEvent_setAtomId(statsEvent, atomId);
AStatsEvent_writeInt32Array(statsEvent, intArray.data(), intArray.size());
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
parseStatsEventToLogEvent(statsEvent, logEvent);
}
@@ -412,7 +412,8 @@
// Make event with is_uid annotation.
LogEvent event2(/*uid=*/0, /*pid=*/0);
- makeIntWithBoolAnnotationLogEvent(&event2, TAG_ID_2, 1111, ANNOTATION_ID_IS_UID, true);
+ makeIntWithBoolAnnotationLogEvent(&event2, TAG_ID_2, 1111, ASTATSLOG_ANNOTATION_ID_IS_UID,
+ true);
// Event has is_uid annotation, so mapping from uid to package name occurs.
simpleMatcher->set_atom_id(TAG_ID_2);
@@ -948,7 +949,7 @@
fieldValueMatcher->set_position(Position::LAST);
EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event));
fieldValueMatcher->set_position(Position::ANY);
- EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event));
neqStringList->add_str_value("str3");
fieldValueMatcher->set_position(Position::FIRST);
@@ -956,7 +957,7 @@
fieldValueMatcher->set_position(Position::LAST);
EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
fieldValueMatcher->set_position(Position::ANY);
- EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event));
neqStringList->add_str_value("str1");
fieldValueMatcher->set_position(Position::FIRST);
@@ -1204,6 +1205,303 @@
matcherResults.push_back(MatchingState::kMatched);
EXPECT_FALSE(combinationMatch(children, operation, matcherResults));
}
+
+TEST(AtomMatcherTest, TestUidFieldMatcherWithWildcardString) {
+ sp<UidMap> uidMap = new UidMap();
+ uidMap->updateMap(
+ 1, {1111, 1111, 2222, 3333, 3333} /* uid list */, {1, 1, 2, 1, 2} /* version list */,
+ {android::String16("v1"), android::String16("v1"), android::String16("v2"),
+ android::String16("v1"), android::String16("v2")},
+ {android::String16("package0"), android::String16("pkg1"), android::String16("pkg1"),
+ android::String16("package2"), android::String16("package3")} /* package name list */,
+ {android::String16(""), android::String16(""), android::String16(""),
+ android::String16(""), android::String16("")},
+ /* certificateHash */ {{}, {}, {}, {}, {}});
+
+ // Set up matcher
+ AtomMatcher matcher;
+ auto simpleMatcher = matcher.mutable_simple_atom_matcher();
+ simpleMatcher->set_atom_id(TAG_ID);
+ simpleMatcher->add_field_value_matcher()->set_field(1);
+ simpleMatcher->mutable_field_value_matcher(0)->set_eq_wildcard_string("pkg*");
+
+ // Event without is_uid annotation.
+ LogEvent event1(/*uid=*/0, /*pid=*/0);
+ makeIntLogEvent(&event1, TAG_ID, 0, 1111);
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event1));
+
+ // Event where mapping from uid to package name occurs.
+ LogEvent event2(/*uid=*/0, /*pid=*/0);
+ makeIntWithBoolAnnotationLogEvent(&event2, TAG_ID, 1111, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event2));
+
+ // Event where uid maps to package names that don't fit wildcard pattern.
+ LogEvent event3(/*uid=*/0, /*pid=*/0);
+ makeIntWithBoolAnnotationLogEvent(&event3, TAG_ID, 3333, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event3));
+
+ // Update matcher to match one AID
+ simpleMatcher->mutable_field_value_matcher(0)->set_eq_wildcard_string(
+ "AID_SYSTEM"); // uid 1000
+
+ // Event where mapping from uid to aid doesn't fit wildcard pattern.
+ LogEvent event4(/*uid=*/0, /*pid=*/0);
+ makeIntWithBoolAnnotationLogEvent(&event4, TAG_ID, 1005, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event4));
+
+ // Event where mapping from uid to aid does fit wildcard pattern.
+ LogEvent event5(/*uid=*/0, /*pid=*/0);
+ makeIntWithBoolAnnotationLogEvent(&event5, TAG_ID, 1000, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event5));
+
+ // Update matcher to match multiple AIDs
+ simpleMatcher->mutable_field_value_matcher(0)->set_eq_wildcard_string("AID_SDCARD_*");
+
+ // Event where mapping from uid to aid doesn't fit wildcard pattern.
+ LogEvent event6(/*uid=*/0, /*pid=*/0);
+ makeIntWithBoolAnnotationLogEvent(&event6, TAG_ID, 1036, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event6));
+
+ // Event where mapping from uid to aid does fit wildcard pattern.
+ LogEvent event7(/*uid=*/0, /*pid=*/0);
+ makeIntWithBoolAnnotationLogEvent(&event7, TAG_ID, 1034, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event7));
+
+ LogEvent event8(/*uid=*/0, /*pid=*/0);
+ makeIntWithBoolAnnotationLogEvent(&event8, TAG_ID, 1035, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event8));
+}
+
+TEST(AtomMatcherTest, TestWildcardStringMatcher) {
+ sp<UidMap> uidMap = new UidMap();
+ // Set up the matcher
+ AtomMatcher matcher;
+ SimpleAtomMatcher* simpleMatcher = matcher.mutable_simple_atom_matcher();
+ simpleMatcher->set_atom_id(TAG_ID);
+ FieldValueMatcher* fieldValueMatcher = simpleMatcher->add_field_value_matcher();
+ fieldValueMatcher->set_field(FIELD_ID_1);
+ // Matches any string that begins with "test.string:test_" and ends with number between 0 and 9
+ // inclusive
+ fieldValueMatcher->set_eq_wildcard_string("test.string:test_[0-9]");
+
+ LogEvent event1(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event1, TAG_ID, 0, "test.string:test_0");
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event1));
+
+ LogEvent event2(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event2, TAG_ID, 0, "test.string:test_19");
+ EXPECT_FALSE(
+ matchesSimple(uidMap, *simpleMatcher, event2)); // extra character at end of string
+
+ LogEvent event3(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event3, TAG_ID, 0, "extra.test.string:test_1");
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher,
+ event3)); // extra characters at beginning of string
+
+ LogEvent event4(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event4, TAG_ID, 0, "test.string:test_");
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher,
+ event4)); // missing character from 0-9 at end of string
+
+ LogEvent event5(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event5, TAG_ID, 0, "est.string:test_1");
+ EXPECT_FALSE(
+ matchesSimple(uidMap, *simpleMatcher, event5)); // missing 't' at beginning of string
+
+ LogEvent event6(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event6, TAG_ID, 0, "test.string:test_1extra");
+ EXPECT_FALSE(
+ matchesSimple(uidMap, *simpleMatcher, event6)); // extra characters at end of string
+
+ // Matches any string that contains "test.string:test_" + any extra characters before or after
+ fieldValueMatcher->set_eq_wildcard_string("*test.string:test_*");
+
+ LogEvent event7(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event7, TAG_ID, 0, "test.string:test_");
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event7));
+
+ LogEvent event8(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event8, TAG_ID, 0, "extra.test.string:test_");
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event8));
+
+ LogEvent event9(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event9, TAG_ID, 0, "test.string:test_extra");
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event9));
+
+ LogEvent event10(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event10, TAG_ID, 0, "est.string:test_");
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event10));
+
+ LogEvent event11(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event11, TAG_ID, 0, "test.string:test");
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event11));
+}
+
+TEST(AtomMatcherTest, TestEqAnyWildcardStringMatcher) {
+ sp<UidMap> uidMap = new UidMap();
+
+ // Set up the matcher
+ AtomMatcher matcher;
+ SimpleAtomMatcher* simpleMatcher = matcher.mutable_simple_atom_matcher();
+ simpleMatcher->set_atom_id(TAG_ID);
+
+ FieldValueMatcher* fieldValueMatcher = simpleMatcher->add_field_value_matcher();
+ fieldValueMatcher->set_field(FIELD_ID_1);
+ StringListMatcher* eqWildcardStrList = fieldValueMatcher->mutable_eq_any_wildcard_string();
+ eqWildcardStrList->add_str_value("first_string_*");
+ eqWildcardStrList->add_str_value("second_string_*");
+
+ // First wildcard pattern matched.
+ LogEvent event1(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event1, TAG_ID, 0, "first_string_1");
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event1));
+
+ // Second wildcard pattern matched.
+ LogEvent event2(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event2, TAG_ID, 0, "second_string_1");
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event2));
+
+ // No wildcard patterns matched.
+ LogEvent event3(/*uid=*/0, /*pid=*/0);
+ makeStringLogEvent(&event3, TAG_ID, 0, "third_string_1");
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event3));
+}
+
+TEST(AtomMatcherTest, TestNeqAnyWildcardStringMatcher) {
+ sp<UidMap> uidMap = new UidMap();
+
+ // Set up the log event.
+ std::vector<int> attributionUids = {1111, 2222, 3333};
+ std::vector<string> attributionTags = {"location_1", "location_2", "location"};
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ makeAttributionLogEvent(&event, TAG_ID, 0, attributionUids, attributionTags, "some value");
+
+ // Set up the matcher. Match first tag.
+ AtomMatcher matcher;
+ SimpleAtomMatcher* simpleMatcher = matcher.mutable_simple_atom_matcher();
+ simpleMatcher->set_atom_id(TAG_ID);
+ FieldValueMatcher* attributionMatcher = simpleMatcher->add_field_value_matcher();
+ attributionMatcher->set_field(FIELD_ID_1);
+ attributionMatcher->set_position(Position::FIRST);
+ FieldValueMatcher* attributionTagMatcher =
+ attributionMatcher->mutable_matches_tuple()->add_field_value_matcher();
+ attributionTagMatcher->set_field(ATTRIBUTION_TAG_FIELD_ID);
+ StringListMatcher* neqWildcardStrList =
+ attributionTagMatcher->mutable_neq_any_wildcard_string();
+
+ // First tag is not matched. neq string list {"tag"}
+ neqWildcardStrList->add_str_value("tag");
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // First tag is matched. neq string list {"tag", "location_*"}
+ neqWildcardStrList->add_str_value("location_*");
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // Match last tag.
+ attributionMatcher->set_position(Position::LAST);
+
+ // Last tag is not matched. neq string list {"tag", "location_*"}
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // Last tag is matched. neq string list {"tag", "location_*", "location*"}
+ neqWildcardStrList->add_str_value("location*");
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // Match any tag.
+ attributionMatcher->set_position(Position::ANY);
+
+ // All tags are matched. neq string list {"tag", "location_*", "location*"}
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // Set up another log event.
+ std::vector<string> attributionTags2 = {"location_1", "location", "string"};
+ LogEvent event2(/*uid=*/0, /*pid=*/0);
+ makeAttributionLogEvent(&event2, TAG_ID, 0, attributionUids, attributionTags2, "some value");
+
+ // Tag "string" is not matched. neq string list {"tag", "location_*", "location*"}
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event2));
+}
+
+TEST(AtomMatcherTest, TestEqAnyIntMatcher) {
+ sp<UidMap> uidMap = new UidMap();
+
+ // Set up the matcher
+ AtomMatcher matcher;
+ SimpleAtomMatcher* simpleMatcher = matcher.mutable_simple_atom_matcher();
+ simpleMatcher->set_atom_id(TAG_ID);
+
+ FieldValueMatcher* fieldValueMatcher = simpleMatcher->add_field_value_matcher();
+ fieldValueMatcher->set_field(FIELD_ID_1);
+ IntListMatcher* eqIntList = fieldValueMatcher->mutable_eq_any_int();
+ eqIntList->add_int_value(3);
+ eqIntList->add_int_value(5);
+
+ // First int matched.
+ LogEvent event1(/*uid=*/0, /*pid=*/0);
+ makeIntLogEvent(&event1, TAG_ID, 0, 3);
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event1));
+
+ // Second int matched.
+ LogEvent event2(/*uid=*/0, /*pid=*/0);
+ makeIntLogEvent(&event2, TAG_ID, 0, 5);
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event2));
+
+ // No ints matched.
+ LogEvent event3(/*uid=*/0, /*pid=*/0);
+ makeIntLogEvent(&event3, TAG_ID, 0, 4);
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event3));
+}
+
+TEST(AtomMatcherTest, TestNeqAnyIntMatcher) {
+ sp<UidMap> uidMap = new UidMap();
+
+ // Set up the log event.
+ std::vector<int> attributionUids = {1111, 2222, 3333};
+ std::vector<string> attributionTags = {"location1", "location2", "location3"};
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ makeAttributionLogEvent(&event, TAG_ID, 0, attributionUids, attributionTags, "some value");
+
+ // Set up the matcher. Match first uid.
+ AtomMatcher matcher;
+ SimpleAtomMatcher* simpleMatcher = matcher.mutable_simple_atom_matcher();
+ simpleMatcher->set_atom_id(TAG_ID);
+ FieldValueMatcher* attributionMatcher = simpleMatcher->add_field_value_matcher();
+ attributionMatcher->set_field(FIELD_ID_1);
+ attributionMatcher->set_position(Position::FIRST);
+ FieldValueMatcher* attributionUidMatcher =
+ attributionMatcher->mutable_matches_tuple()->add_field_value_matcher();
+ attributionUidMatcher->set_field(ATTRIBUTION_UID_FIELD_ID);
+ IntListMatcher* neqIntList = attributionUidMatcher->mutable_neq_any_int();
+
+ // First uid is not matched. neq int list {4444}
+ neqIntList->add_int_value(4444);
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // First uid is matched. neq int list {4444, 1111}
+ neqIntList->add_int_value(1111);
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // Match last uid.
+ attributionMatcher->set_position(Position::LAST);
+
+ // Last uid is not matched. neq int list {4444, 1111}
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // Last uid is matched. neq int list {4444, 1111, 3333}
+ neqIntList->add_int_value(3333);
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // Match any uid.
+ attributionMatcher->set_position(Position::ANY);
+
+ // Uid 2222 is not matched. neq int list {4444, 1111, 3333}
+ EXPECT_TRUE(matchesSimple(uidMap, *simpleMatcher, event));
+
+ // All uids are matched. neq int list {4444, 1111, 3333, 2222}
+ neqIntList->add_int_value(2222);
+ EXPECT_FALSE(matchesSimple(uidMap, *simpleMatcher, event));
+}
+
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif
diff --git a/statsd/tests/LogEventFilter_test.cpp b/statsd/tests/LogEventFilter_test.cpp
new file mode 100644
index 0000000..037f02d
--- /dev/null
+++ b/statsd/tests/LogEventFilter_test.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "socket/LogEventFilter.h"
+
+#include <gtest/gtest.h>
+
+#include <algorithm>
+
+#ifdef __ANDROID__
+
+namespace android {
+namespace os {
+namespace statsd {
+
+namespace {
+
+constexpr int kAtomIdsCount = 100; // Filter size setup
+
+LogEventFilter::AtomIdSet generateAtomIds(int rangeStart, int rangeEndInclusive) {
+ LogEventFilter::AtomIdSet atomIds;
+ for (int i = rangeStart; i <= rangeEndInclusive; ++i) {
+ atomIds.insert(i);
+ }
+ return atomIds;
+}
+
+bool testGuaranteedUnusedAtomsNotInUse(const LogEventFilter& filter) {
+ const auto sampleIds = generateAtomIds(10000, 11000);
+ bool atLeastOneInUse = false;
+ for (const auto& atomId : sampleIds) {
+ atLeastOneInUse |= filter.isAtomInUse(atomId);
+ }
+ return !atLeastOneInUse;
+}
+
+} // namespace
+
+TEST(LogEventFilterTest, TestEmptyFilter) {
+ LogEventFilter filter;
+ const auto sampleIds = generateAtomIds(1, kAtomIdsCount);
+ for (const auto& atomId : sampleIds) {
+ EXPECT_FALSE(filter.isAtomInUse(atomId));
+ }
+}
+
+TEST(LogEventFilterTest, TestRemoveNonExistingEmptyFilter) {
+ LogEventFilter filter;
+ EXPECT_FALSE(filter.isAtomInUse(1));
+ LogEventFilter::AtomIdSet emptyAtomIdsSet;
+ EXPECT_EQ(0, filter.mTagIdsPerConsumer.size());
+ EXPECT_EQ(0, filter.mLocalTagIds.size());
+ filter.setAtomIds(std::move(emptyAtomIdsSet), reinterpret_cast<LogEventFilter::ConsumerId>(0));
+ EXPECT_FALSE(filter.isAtomInUse(1));
+ EXPECT_EQ(0, filter.mLocalTagIds.size());
+ EXPECT_EQ(0, filter.mTagIdsPerConsumer.size());
+}
+
+TEST(LogEventFilterTest, TestEmptyFilterDisabled) {
+ LogEventFilter filter;
+ filter.setFilteringEnabled(false);
+ const auto sampleIds = generateAtomIds(1, kAtomIdsCount);
+ for (const auto& atomId : sampleIds) {
+ EXPECT_TRUE(filter.isAtomInUse(atomId));
+ }
+}
+
+TEST(LogEventFilterTest, TestNonEmptyFilterFullOverlap) {
+ LogEventFilter filter;
+ auto filterIds = generateAtomIds(1, kAtomIdsCount);
+ filter.setAtomIds(std::move(filterIds), reinterpret_cast<LogEventFilter::ConsumerId>(0));
+ EXPECT_EQ(1, filter.mTagIdsPerConsumer.size());
+
+ // inner copy updated only during fetch if required
+ EXPECT_EQ(0, filter.mLocalTagIds.size());
+ const auto sampleIds = generateAtomIds(1, kAtomIdsCount);
+ for (const auto& atomId : sampleIds) {
+ EXPECT_TRUE(filter.isAtomInUse(atomId));
+ }
+ EXPECT_EQ(kAtomIdsCount, filter.mLocalTagIds.size());
+}
+
+TEST(LogEventFilterTest, TestNonEmptyFilterPartialOverlap) {
+ LogEventFilter filter;
+ auto filterIds = generateAtomIds(1, kAtomIdsCount);
+ filter.setAtomIds(std::move(filterIds), reinterpret_cast<LogEventFilter::ConsumerId>(0));
+ // extra 100 atom ids should be filtered out
+ const auto sampleIds = generateAtomIds(1, kAtomIdsCount + 100);
+ for (const auto& atomId : sampleIds) {
+ bool const atomInUse = atomId <= kAtomIdsCount;
+ EXPECT_EQ(atomInUse, filter.isAtomInUse(atomId));
+ }
+}
+
+TEST(LogEventFilterTest, TestNonEmptyFilterDisabledPartialOverlap) {
+ LogEventFilter filter;
+ auto filterIds = generateAtomIds(1, kAtomIdsCount);
+ filter.setAtomIds(std::move(filterIds), reinterpret_cast<LogEventFilter::ConsumerId>(0));
+ filter.setFilteringEnabled(false);
+ // extra 100 atom ids should be in use due to filter is disabled
+ const auto sampleIds = generateAtomIds(1, kAtomIdsCount + 100);
+ for (const auto& atomId : sampleIds) {
+ EXPECT_TRUE(filter.isAtomInUse(atomId));
+ }
+}
+
+TEST(LogEventFilterTest, TestMultipleConsumerOverlapIdsRemoved) {
+ LogEventFilter filter;
+ auto filterIds1 = generateAtomIds(1, kAtomIdsCount);
+ // half of filterIds1 atom ids overlaps with filterIds2
+ auto filterIds2 = generateAtomIds(kAtomIdsCount / 2, kAtomIdsCount * 2);
+ filter.setAtomIds(std::move(filterIds1), reinterpret_cast<LogEventFilter::ConsumerId>(0));
+ filter.setAtomIds(std::move(filterIds2), reinterpret_cast<LogEventFilter::ConsumerId>(1));
+ // inner copy updated only during fetch if required
+ EXPECT_EQ(0, filter.mLocalTagIds.size());
+ const auto sampleIds = generateAtomIds(1, kAtomIdsCount * 2);
+ for (const auto& atomId : sampleIds) {
+ EXPECT_TRUE(filter.isAtomInUse(atomId));
+ }
+ EXPECT_EQ(kAtomIdsCount * 2, filter.mLocalTagIds.size());
+ EXPECT_TRUE(testGuaranteedUnusedAtomsNotInUse(filter));
+
+ // set empty filter for second consumer
+ LogEventFilter::AtomIdSet emptyAtomIdsSet;
+ filter.setAtomIds(std::move(emptyAtomIdsSet), reinterpret_cast<LogEventFilter::ConsumerId>(1));
+ EXPECT_EQ(kAtomIdsCount * 2, filter.mLocalTagIds.size());
+ for (const auto& atomId : sampleIds) {
+ bool const atomInUse = atomId <= kAtomIdsCount;
+ EXPECT_EQ(atomInUse, filter.isAtomInUse(atomId));
+ }
+ EXPECT_EQ(kAtomIdsCount, filter.mLocalTagIds.size());
+ EXPECT_TRUE(testGuaranteedUnusedAtomsNotInUse(filter));
+}
+
+TEST(LogEventFilterTest, TestMultipleConsumerEmptyFilter) {
+ LogEventFilter filter;
+ auto filterIds1 = generateAtomIds(1, kAtomIdsCount);
+ auto filterIds2 = generateAtomIds(kAtomIdsCount + 1, kAtomIdsCount * 2);
+ filter.setAtomIds(std::move(filterIds1), reinterpret_cast<LogEventFilter::ConsumerId>(0));
+ filter.setAtomIds(std::move(filterIds2), reinterpret_cast<LogEventFilter::ConsumerId>(1));
+ EXPECT_EQ(2, filter.mTagIdsPerConsumer.size());
+ // inner copy updated only during fetch if required
+ EXPECT_EQ(0, filter.mLocalTagIds.size());
+ const auto sampleIds = generateAtomIds(1, kAtomIdsCount * 2);
+ for (const auto& atomId : sampleIds) {
+ EXPECT_TRUE(filter.isAtomInUse(atomId));
+ }
+ EXPECT_EQ(kAtomIdsCount * 2, filter.mLocalTagIds.size());
+ EXPECT_TRUE(testGuaranteedUnusedAtomsNotInUse(filter));
+
+ // set empty filter for first consumer
+ LogEventFilter::AtomIdSet emptyAtomIdsSet;
+ filter.setAtomIds(emptyAtomIdsSet, reinterpret_cast<LogEventFilter::ConsumerId>(0));
+ EXPECT_EQ(1, filter.mTagIdsPerConsumer.size());
+ EXPECT_EQ(kAtomIdsCount * 2, filter.mLocalTagIds.size());
+ for (const auto& atomId : sampleIds) {
+ bool const atomInUse = atomId > kAtomIdsCount;
+ EXPECT_EQ(atomInUse, filter.isAtomInUse(atomId));
+ }
+ EXPECT_EQ(kAtomIdsCount, filter.mLocalTagIds.size());
+ EXPECT_TRUE(testGuaranteedUnusedAtomsNotInUse(filter));
+
+ // set empty filter for second consumer
+ filter.setAtomIds(emptyAtomIdsSet, reinterpret_cast<LogEventFilter::ConsumerId>(1));
+ EXPECT_EQ(0, filter.mTagIdsPerConsumer.size());
+ EXPECT_EQ(kAtomIdsCount, filter.mLocalTagIds.size());
+ for (const auto& atomId : sampleIds) {
+ EXPECT_FALSE(filter.isAtomInUse(atomId));
+ }
+ EXPECT_EQ(0, filter.mLocalTagIds.size());
+ EXPECT_TRUE(testGuaranteedUnusedAtomsNotInUse(filter));
+}
+
+} // namespace statsd
+} // namespace os
+} // namespace android
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
diff --git a/statsd/tests/LogEvent_test.cpp b/statsd/tests/LogEvent_test.cpp
index d87b20b..1a9b920 100644
--- a/statsd/tests/LogEvent_test.cpp
+++ b/statsd/tests/LogEvent_test.cpp
@@ -14,12 +14,16 @@
#include "src/logd/LogEvent.h"
+#include <android-modules-utils/sdk_level.h>
#include <gtest/gtest.h>
+#include "flags/FlagProvider.h"
#include "frameworks/proto_logging/stats/atoms.pb.h"
#include "frameworks/proto_logging/stats/enums/stats/launcher/launcher.pb.h"
#include "log/log_event_list.h"
+#include "stats_annotations.h"
#include "stats_event.h"
+#include "statsd_test_util.h"
#ifdef __ANDROID__
@@ -27,10 +31,11 @@
namespace os {
namespace statsd {
+using android::modules::sdklevel::IsAtLeastU;
using std::string;
using std::vector;
-using util::ProtoOutputStream;
-using util::ProtoReader;
+using ::util::ProtoOutputStream;
+using ::util::ProtoReader;
namespace {
@@ -43,94 +48,117 @@
return f;
}
-void createStatsEvent(AStatsEvent* statsEvent, uint8_t typeId) {
- AStatsEvent_setAtomId(statsEvent, /*atomId=*/100);
-
- int int32Array[2] = {3, 6};
- uint32_t uids[] = {1001, 1002};
- const char* tags[] = {"tag1", "tag2"};
-
- switch (typeId) {
- case INT32_TYPE:
- AStatsEvent_writeInt32(statsEvent, 10);
- break;
- case INT64_TYPE:
- AStatsEvent_writeInt64(statsEvent, 1000L);
- break;
- case STRING_TYPE:
- AStatsEvent_writeString(statsEvent, "test");
- break;
- case LIST_TYPE:
- AStatsEvent_writeInt32Array(statsEvent, int32Array, 2);
- break;
- case FLOAT_TYPE:
- AStatsEvent_writeFloat(statsEvent, 1.3f);
- break;
- case BOOL_TYPE:
- AStatsEvent_writeBool(statsEvent, 1);
- break;
- case BYTE_ARRAY_TYPE:
- AStatsEvent_writeByteArray(statsEvent, (uint8_t*)"test", strlen("test"));
- break;
- case ATTRIBUTION_CHAIN_TYPE:
- AStatsEvent_writeAttributionChain(statsEvent, uids, tags, 2);
- break;
- default:
- break;
- }
-}
-
-void createFieldWithBoolAnnotationLogEvent(LogEvent* logEvent, uint8_t typeId, uint8_t annotationId,
- bool annotationValue, bool parseBufferResult) {
+bool createFieldWithBoolAnnotationLogEvent(LogEvent* logEvent, uint8_t typeId, uint8_t annotationId,
+ bool annotationValue, bool doHeaderPrefetch) {
AStatsEvent* statsEvent = AStatsEvent_obtain();
- createStatsEvent(statsEvent, typeId);
+ createStatsEvent(statsEvent, typeId, /*atomId=*/100);
AStatsEvent_addBoolAnnotation(statsEvent, annotationId, annotationValue);
AStatsEvent_build(statsEvent);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
- EXPECT_EQ(parseBufferResult, logEvent->parseBuffer(buf, size));
-
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ if (doHeaderPrefetch) {
+ // Testing LogEvent header prefetch logic
+ const LogEvent::BodyBufferInfo bodyInfo = logEvent->parseHeader(buf, size);
+ logEvent->parseBody(bodyInfo);
+ } else {
+ logEvent->parseBuffer(buf, size);
+ }
AStatsEvent_release(statsEvent);
+
+ return logEvent->isValid();
}
-void createFieldWithIntAnnotationLogEvent(LogEvent* logEvent, uint8_t typeId, uint8_t annotationId,
- int annotationValue, bool parseBufferResult) {
+bool createFieldWithIntAnnotationLogEvent(LogEvent* logEvent, uint8_t typeId, uint8_t annotationId,
+ int annotationValue, bool doHeaderPrefetch) {
AStatsEvent* statsEvent = AStatsEvent_obtain();
- createStatsEvent(statsEvent, typeId);
+ createStatsEvent(statsEvent, typeId, /*atomId=*/100);
AStatsEvent_addInt32Annotation(statsEvent, annotationId, annotationValue);
AStatsEvent_build(statsEvent);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
- EXPECT_EQ(parseBufferResult, logEvent->parseBuffer(buf, size));
-
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ if (doHeaderPrefetch) {
+ // Testing LogEvent header prefetch logic
+ const LogEvent::BodyBufferInfo bodyInfo = logEvent->parseHeader(buf, size);
+ logEvent->parseBody(bodyInfo);
+ } else {
+ logEvent->parseBuffer(buf, size);
+ }
AStatsEvent_release(statsEvent);
+
+ return logEvent->isValid();
+}
+
+bool createAtomLevelIntAnnotationLogEvent(LogEvent* logEvent, uint8_t typeId, uint8_t annotationId,
+ int annotationValue, bool doHeaderPrefetch) {
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, /*atomId=*/100);
+ AStatsEvent_addInt32Annotation(statsEvent, annotationId, annotationValue);
+ fillStatsEventWithSampleValue(statsEvent, typeId);
+ AStatsEvent_build(statsEvent);
+
+ size_t size;
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ if (doHeaderPrefetch) {
+ // Testing LogEvent header prefetch logic
+ const LogEvent::BodyBufferInfo bodyInfo = logEvent->parseHeader(buf, size);
+ logEvent->parseBody(bodyInfo);
+ } else {
+ logEvent->parseBuffer(buf, size);
+ }
+ AStatsEvent_release(statsEvent);
+
+ return logEvent->isValid();
+}
+
+bool createAtomLevelBoolAnnotationLogEvent(LogEvent* logEvent, uint8_t typeId, uint8_t annotationId,
+ bool annotationValue, bool doHeaderPrefetch) {
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, /*atomId=*/100);
+ AStatsEvent_addBoolAnnotation(statsEvent, annotationId, annotationValue);
+ fillStatsEventWithSampleValue(statsEvent, typeId);
+ AStatsEvent_build(statsEvent);
+
+ size_t size;
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ if (doHeaderPrefetch) {
+ // Testing LogEvent header prefetch logic
+ const LogEvent::BodyBufferInfo bodyInfo = logEvent->parseHeader(buf, size);
+ logEvent->parseBody(bodyInfo);
+ } else {
+ logEvent->parseBuffer(buf, size);
+ }
+ AStatsEvent_release(statsEvent);
+
+ return logEvent->isValid();
}
} // anonymous namespace
// Setup for parameterized tests.
-class LogEventTestBadAnnotationFieldTypes : public testing::TestWithParam<int> {
+class LogEventTestBadAnnotationFieldTypes : public testing::TestWithParam<std::tuple<int, bool>> {
public:
- static std::string ToString(testing::TestParamInfo<int> info) {
- switch (info.param) {
+ static std::string ToString(testing::TestParamInfo<std::tuple<int, bool>> info) {
+ const std::string boolName = std::get<1>(info.param) ? "_prefetchTrue" : "_prefetchFalse";
+
+ switch (std::get<0>(info.param)) {
case INT32_TYPE:
- return "Int32";
+ return "Int32" + boolName;
case INT64_TYPE:
- return "Int64";
+ return "Int64" + boolName;
case STRING_TYPE:
- return "String";
+ return "String" + boolName;
case LIST_TYPE:
- return "List";
+ return "List" + boolName;
case FLOAT_TYPE:
- return "Float";
+ return "Float" + boolName;
case BYTE_ARRAY_TYPE:
- return "ByteArray";
+ return "ByteArray" + boolName;
case ATTRIBUTION_CHAIN_TYPE:
- return "AttributionChain";
+ return "AttributionChain" + boolName;
default:
- return "Unknown";
+ return "Unknown" + boolName;
}
}
};
@@ -138,11 +166,40 @@
// TODO(b/222539899): Add BOOL_TYPE value once parseAnnotations is updated to check specific
// typeIds. BOOL_TYPE should be a bad field type for is_uid, nested, and reset state annotations.
INSTANTIATE_TEST_SUITE_P(BadAnnotationFieldTypes, LogEventTestBadAnnotationFieldTypes,
- testing::Values(INT32_TYPE, INT64_TYPE, STRING_TYPE, LIST_TYPE, FLOAT_TYPE,
- BYTE_ARRAY_TYPE, ATTRIBUTION_CHAIN_TYPE),
+ testing::Combine(testing::Values(INT32_TYPE, INT64_TYPE, STRING_TYPE,
+ LIST_TYPE, FLOAT_TYPE, BYTE_ARRAY_TYPE,
+ ATTRIBUTION_CHAIN_TYPE),
+ testing::Bool()),
LogEventTestBadAnnotationFieldTypes::ToString);
-TEST(LogEventTest, TestPrimitiveParsing) {
+class LogEventTest : public testing::TestWithParam<bool> {
+public:
+ bool ParseBuffer(LogEvent& logEvent, const uint8_t* buf, size_t size) {
+ size_t bufferOffset = 0;
+ if (GetParam()) {
+ // Testing LogEvent header prefetch logic
+ const LogEvent::BodyBufferInfo bodyInfo = logEvent.parseHeader(buf, size);
+ EXPECT_TRUE(logEvent.isParsedHeaderOnly());
+ const bool parseResult = logEvent.parseBody(bodyInfo);
+ EXPECT_EQ(parseResult, logEvent.isValid());
+ EXPECT_FALSE(logEvent.isParsedHeaderOnly());
+ } else {
+ const bool parseResult = logEvent.parseBuffer(buf, size);
+ EXPECT_EQ(parseResult, logEvent.isValid());
+ EXPECT_FALSE(logEvent.isParsedHeaderOnly());
+ }
+ return logEvent.isValid();
+ }
+
+ static std::string ToString(testing::TestParamInfo<bool> info) {
+ return info.param ? "PrefetchTrue" : "PrefetchFalse";
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(LogEventTestBufferParsing, LogEventTest, testing::Bool(),
+ LogEventTest::ToString);
+
+TEST_P(LogEventTest, TestPrimitiveParsing) {
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
AStatsEvent_writeInt32(event, 10);
@@ -152,10 +209,10 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(100, logEvent.GetTagId());
EXPECT_EQ(1000, logEvent.GetUid());
@@ -192,7 +249,59 @@
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestStringAndByteArrayParsing) {
+TEST_P(LogEventTest, TestEventWithInvalidHeaderParsing) {
+ AStatsEvent* event = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(event, 100);
+ AStatsEvent_writeInt32(event, 10);
+ AStatsEvent_writeInt64(event, 0x123456789);
+ AStatsEvent_writeFloat(event, 2.0);
+ AStatsEvent_writeBool(event, true);
+ AStatsEvent_build(event);
+
+ size_t size;
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+
+ // Corrupt LogEvent header info
+ // OBJECT_TYPE | NUM_FIELDS | TIMESTAMP | ATOM_ID
+ // Corrupting first 4 bytes will be sufficient
+ uint8_t* bufMod = const_cast<uint8_t*>(buf);
+ memset(static_cast<void*>(bufMod), 4, ERROR_TYPE);
+
+ LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
+ EXPECT_FALSE(ParseBuffer(logEvent, buf, size));
+ EXPECT_FALSE(logEvent.isValid());
+ EXPECT_FALSE(logEvent.isParsedHeaderOnly());
+
+ AStatsEvent_release(event);
+}
+
+TEST(LogEventTestParsing, TestFetchHeaderOnly) {
+ AStatsEvent* event = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(event, 100);
+ AStatsEvent_writeInt32(event, 10);
+ AStatsEvent_writeInt64(event, 0x123456789);
+ AStatsEvent_writeFloat(event, 2.0);
+ AStatsEvent_writeBool(event, true);
+ AStatsEvent_build(event);
+
+ size_t size;
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+
+ LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
+ const LogEvent::BodyBufferInfo bodyInfo = logEvent.parseHeader(buf, size);
+ EXPECT_TRUE(logEvent.isValid());
+ EXPECT_TRUE(logEvent.isParsedHeaderOnly());
+
+ AStatsEvent_release(event);
+
+ EXPECT_EQ(100, logEvent.GetTagId());
+ EXPECT_EQ(1000, logEvent.GetUid());
+ EXPECT_EQ(1001, logEvent.GetPid());
+ EXPECT_FALSE(logEvent.hasAttributionChain());
+ ASSERT_EQ(0, logEvent.getValues().size());
+}
+
+TEST_P(LogEventTest, TestStringAndByteArrayParsing) {
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
string str = "test";
@@ -201,10 +310,10 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(100, logEvent.GetTagId());
EXPECT_EQ(1000, logEvent.GetUid());
@@ -230,7 +339,7 @@
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestEmptyString) {
+TEST_P(LogEventTest, TestEmptyString) {
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
string empty = "";
@@ -238,10 +347,10 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(100, logEvent.GetTagId());
EXPECT_EQ(1000, logEvent.GetUid());
@@ -260,7 +369,7 @@
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestByteArrayWithNullCharacter) {
+TEST_P(LogEventTest, TestByteArrayWithNullCharacter) {
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
uint8_t message[] = {'\t', 'e', '\0', 's', 't'};
@@ -268,10 +377,10 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(100, logEvent.GetTagId());
EXPECT_EQ(1000, logEvent.GetUid());
@@ -290,7 +399,7 @@
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestTooManyTopLevelElements) {
+TEST_P(LogEventTest, TestTooManyTopLevelElements) {
int32_t numElements = 128;
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
@@ -302,14 +411,14 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_FALSE(logEvent.parseBuffer(buf, size));
+ EXPECT_FALSE(ParseBuffer(logEvent, buf, size));
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestAttributionChain) {
+TEST_P(LogEventTest, TestAttributionChain) {
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
@@ -323,10 +432,10 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(100, logEvent.GetTagId());
EXPECT_EQ(1000, logEvent.GetUid());
@@ -369,7 +478,7 @@
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestEmptyAttributionChain) {
+TEST_P(LogEventTest, TestEmptyAttributionChain) {
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
@@ -378,15 +487,15 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_FALSE(logEvent.parseBuffer(buf, size));
+ EXPECT_FALSE(ParseBuffer(logEvent, buf, size));
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestAttributionChainTooManyElements) {
+TEST_P(LogEventTest, TestAttributionChainTooManyElements) {
int32_t numNodes = 128;
uint32_t uids[numNodes];
vector<string> tags(numNodes); // storage that cTag elements point to
@@ -404,14 +513,14 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_FALSE(logEvent.parseBuffer(buf, size));
+ EXPECT_FALSE(ParseBuffer(logEvent, buf, size));
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestArrayParsing) {
+TEST_P(LogEventTest, TestArrayParsing) {
size_t numElements = 2;
int32_t int32Array[2] = {3, 6};
int64_t int64Array[2] = {1000L, 1002L};
@@ -434,10 +543,10 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(100, logEvent.GetTagId());
EXPECT_EQ(1000, logEvent.GetUid());
@@ -510,7 +619,7 @@
EXPECT_EQ("str2", stringArrayItem2.mValue.str_value);
}
-TEST(LogEventTest, TestEmptyStringArray) {
+TEST_P(LogEventTest, TestEmptyStringArray) {
const char* cStringArray[2];
string empty = "";
cStringArray[0] = empty.c_str();
@@ -522,10 +631,10 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(100, logEvent.GetTagId());
EXPECT_EQ(1000, logEvent.GetUid());
@@ -549,7 +658,7 @@
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestArrayTooManyElements) {
+TEST_P(LogEventTest, TestArrayTooManyElements) {
int32_t numElements = 128;
int32_t int32Array[numElements];
@@ -563,15 +672,15 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_FALSE(logEvent.parseBuffer(buf, size));
+ EXPECT_FALSE(ParseBuffer(logEvent, buf, size));
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestEmptyArray) {
+TEST_P(LogEventTest, TestEmptyArray) {
int32_t int32Array[0] = {};
AStatsEvent* event = AStatsEvent_obtain();
@@ -580,25 +689,25 @@
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(100, logEvent.GetTagId());
EXPECT_EQ(1000, logEvent.GetUid());
EXPECT_EQ(1001, logEvent.GetPid());
- const vector<FieldValue>& values = logEvent.getValues();
- ASSERT_EQ(0, values.size());
+ ASSERT_EQ(logEvent.getValues().size(), 0);
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestAnnotationIdIsUid) {
+TEST_P(LogEventTest, TestAnnotationIdIsUid) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_IS_UID, true,
- /*parseBufferResult*/ true);
+ EXPECT_TRUE(createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE,
+ ASTATSLOG_ANNOTATION_ID_IS_UID, true,
+ /*doHeaderPrefetch=*/GetParam()));
ASSERT_EQ(event.getNumUidFields(), 1);
@@ -607,7 +716,7 @@
EXPECT_TRUE(isUidField(values.at(0)));
}
-TEST(LogEventTest, TestAnnotationIdIsUid_RepeatedIntAndOtherFields) {
+TEST_P(LogEventTest, TestAnnotationIdIsUid_RepeatedIntAndOtherFields) {
size_t numElements = 2;
int32_t int32Array[2] = {3, 6};
@@ -621,14 +730,14 @@
AStatsEvent_setAtomId(statsEvent, 100);
AStatsEvent_writeInt32(statsEvent, 5);
AStatsEvent_writeInt32Array(statsEvent, int32Array, numElements);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_writeStringArray(statsEvent, cStringArray, numElements);
AStatsEvent_build(statsEvent);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(2, logEvent.getNumUidFields());
const vector<FieldValue>& values = logEvent.getValues();
@@ -640,20 +749,20 @@
EXPECT_FALSE(isUidField(values.at(4)));
}
-TEST(LogEventTest, TestAnnotationIdIsUid_RepeatedIntOneEntry) {
+TEST_P(LogEventTest, TestAnnotationIdIsUid_RepeatedIntOneEntry) {
size_t numElements = 1;
int32_t int32Array[1] = {3};
AStatsEvent* statsEvent = AStatsEvent_obtain();
AStatsEvent_setAtomId(statsEvent, 100);
AStatsEvent_writeInt32Array(statsEvent, int32Array, numElements);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_build(statsEvent);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(1, logEvent.getNumUidFields());
const vector<FieldValue>& values = logEvent.getValues();
@@ -661,46 +770,46 @@
EXPECT_TRUE(isUidField(values.at(0)));
}
-TEST(LogEventTest, TestAnnotationIdIsUid_EmptyIntArray) {
+TEST_P(LogEventTest, TestAnnotationIdIsUid_EmptyIntArray) {
int32_t int32Array[0] = {};
AStatsEvent* statsEvent = AStatsEvent_obtain();
AStatsEvent_setAtomId(statsEvent, 100);
AStatsEvent_writeInt32Array(statsEvent, int32Array, /*numElements*/ 0);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_writeInt32(statsEvent, 5);
AStatsEvent_build(statsEvent);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(0, logEvent.getNumUidFields());
const vector<FieldValue>& values = logEvent.getValues();
EXPECT_EQ(values.size(), 1);
}
-TEST(LogEventTest, TestAnnotationIdIsUid_BadRepeatedInt64) {
+TEST_P(LogEventTest, TestAnnotationIdIsUid_BadRepeatedInt64) {
int64_t int64Array[2] = {1000L, 1002L};
AStatsEvent* statsEvent = AStatsEvent_obtain();
AStatsEvent_setAtomId(statsEvent, /*atomId=*/100);
AStatsEvent_writeInt64Array(statsEvent, int64Array, /*numElements*/ 2);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_build(statsEvent);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_FALSE(logEvent.parseBuffer(buf, size));
+ EXPECT_FALSE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(0, logEvent.getNumUidFields());
AStatsEvent_release(statsEvent);
}
-TEST(LogEventTest, TestAnnotationIdIsUid_BadRepeatedString) {
+TEST_P(LogEventTest, TestAnnotationIdIsUid_BadRepeatedString) {
size_t numElements = 2;
vector<string> stringArray = {"str1", "str2"};
const char* cStringArray[2];
@@ -711,14 +820,14 @@
AStatsEvent* statsEvent = AStatsEvent_obtain();
AStatsEvent_setAtomId(statsEvent, /*atomId=*/100);
AStatsEvent_writeStringArray(statsEvent, cStringArray, /*numElements*/ 2);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_build(statsEvent);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_FALSE(logEvent.parseBuffer(buf, size));
+ EXPECT_FALSE(ParseBuffer(logEvent, buf, size));
EXPECT_EQ(0, logEvent.getNumUidFields());
AStatsEvent_release(statsEvent);
@@ -727,22 +836,25 @@
TEST_P(LogEventTestBadAnnotationFieldTypes, TestAnnotationIdIsUid) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- if (GetParam() != INT32_TYPE && GetParam() != LIST_TYPE) {
- createFieldWithBoolAnnotationLogEvent(&event, GetParam(), ANNOTATION_ID_IS_UID, true,
- /*parseBufferResult*/ false);
+ if (std::get<0>(GetParam()) != INT32_TYPE && std::get<0>(GetParam()) != LIST_TYPE) {
+ EXPECT_FALSE(createFieldWithBoolAnnotationLogEvent(
+ &event, std::get<0>(GetParam()), ASTATSLOG_ANNOTATION_ID_IS_UID, true,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
}
}
-TEST(LogEventTest, TestAnnotationIdIsUid_NotIntAnnotation) {
+TEST_P(LogEventTest, TestAnnotationIdIsUid_NotIntAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_IS_UID, 10,
- /*parseBufferResult*/ false);
+ EXPECT_FALSE(createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE,
+ ASTATSLOG_ANNOTATION_ID_IS_UID, 10,
+ /*doHeaderPrefetch=*/GetParam()));
}
-TEST(LogEventTest, TestAnnotationIdStateNested) {
+TEST_P(LogEventTest, TestAnnotationIdStateNested) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_STATE_NESTED, true,
- /*parseBufferResult*/ true);
+ EXPECT_TRUE(createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE,
+ ASTATSLOG_ANNOTATION_ID_STATE_NESTED, true,
+ /*doHeaderPrefetch=*/GetParam()));
const vector<FieldValue>& values = event.getValues();
ASSERT_EQ(values.size(), 1);
@@ -752,22 +864,25 @@
TEST_P(LogEventTestBadAnnotationFieldTypes, TestAnnotationIdStateNested) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- if (GetParam() != INT32_TYPE) {
- createFieldWithBoolAnnotationLogEvent(&event, GetParam(), ANNOTATION_ID_STATE_NESTED, true,
- /*parseBufferResult*/ false);
+ if (std::get<0>(GetParam()) != INT32_TYPE) {
+ EXPECT_FALSE(createFieldWithBoolAnnotationLogEvent(
+ &event, std::get<0>(GetParam()), ASTATSLOG_ANNOTATION_ID_STATE_NESTED, true,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
}
}
-TEST(LogEventTest, TestAnnotationIdStateNested_NotIntAnnotation) {
+TEST_P(LogEventTest, TestAnnotationIdStateNested_NotIntAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_STATE_NESTED, 10,
- /*parseBufferResult*/ false);
+ EXPECT_FALSE(createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE,
+ ASTATSLOG_ANNOTATION_ID_STATE_NESTED, 10,
+ /*doHeaderPrefetch=*/GetParam()));
}
-TEST(LogEventTest, TestPrimaryFieldAnnotation) {
+TEST_P(LogEventTest, TestPrimaryFieldAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_PRIMARY_FIELD, true,
- /*parseBufferResult*/ true);
+ EXPECT_TRUE(createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE,
+ ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true,
+ /*doHeaderPrefetch=*/GetParam()));
const vector<FieldValue>& values = event.getValues();
ASSERT_EQ(values.size(), 1);
@@ -777,22 +892,25 @@
TEST_P(LogEventTestBadAnnotationFieldTypes, TestPrimaryFieldAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- if (GetParam() == LIST_TYPE || GetParam() == ATTRIBUTION_CHAIN_TYPE) {
- createFieldWithBoolAnnotationLogEvent(&event, GetParam(), ANNOTATION_ID_PRIMARY_FIELD, true,
- /*parseBufferResult*/ false);
+ if (std::get<0>(GetParam()) == LIST_TYPE || std::get<0>(GetParam()) == ATTRIBUTION_CHAIN_TYPE) {
+ EXPECT_FALSE(createFieldWithBoolAnnotationLogEvent(
+ &event, std::get<0>(GetParam()), ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
}
}
-TEST(LogEventTest, TestPrimaryFieldAnnotation_NotIntAnnotation) {
+TEST_P(LogEventTest, TestPrimaryFieldAnnotation_NotIntAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_PRIMARY_FIELD, 10,
- /*parseBufferResult*/ false);
+ EXPECT_FALSE(createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE,
+ ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, 10,
+ /*doHeaderPrefetch=*/GetParam()));
}
-TEST(LogEventTest, TestExclusiveStateAnnotation) {
+TEST_P(LogEventTest, TestExclusiveStateAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_EXCLUSIVE_STATE, true,
- /*parseBufferResult*/ true);
+ EXPECT_TRUE(createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE,
+ ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true,
+ /*doHeaderPrefetch=*/GetParam()));
const vector<FieldValue>& values = event.getValues();
ASSERT_EQ(values.size(), 1);
@@ -802,20 +920,21 @@
TEST_P(LogEventTestBadAnnotationFieldTypes, TestExclusiveStateAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- if (GetParam() != INT32_TYPE) {
- createFieldWithBoolAnnotationLogEvent(&event, GetParam(), ANNOTATION_ID_EXCLUSIVE_STATE,
- true,
- /*parseBufferResult*/ false);
+ if (std::get<0>(GetParam()) != INT32_TYPE) {
+ EXPECT_FALSE(createFieldWithBoolAnnotationLogEvent(
+ &event, std::get<0>(GetParam()), ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
}
}
-TEST(LogEventTest, TestExclusiveStateAnnotation_NotIntAnnotation) {
+TEST_P(LogEventTest, TestExclusiveStateAnnotation_NotIntAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_EXCLUSIVE_STATE, 10,
- /*parseBufferResult*/ false);
+ EXPECT_FALSE(createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE,
+ ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, 10,
+ /*doHeaderPrefetch=*/GetParam()));
}
-TEST(LogEventTest, TestPrimaryFieldFirstUidAnnotation) {
+TEST_P(LogEventTest, TestPrimaryFieldFirstUidAnnotation) {
// Event has 10 ints and then an attribution chain
int numInts = 10;
int firstUidInChainIndex = numInts;
@@ -831,14 +950,15 @@
AStatsEvent_writeInt32(statsEvent, 10);
}
AStatsEvent_writeAttributionChain(statsEvent, uids, tags, 2);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID,
+ true);
AStatsEvent_build(statsEvent);
// Construct LogEvent
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
LogEvent logEvent(/*uid=*/0, /*pid=*/0);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
AStatsEvent_release(statsEvent);
// Check annotation
@@ -850,50 +970,88 @@
TEST_P(LogEventTestBadAnnotationFieldTypes, TestPrimaryFieldFirstUidAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- if (GetParam() != ATTRIBUTION_CHAIN_TYPE) {
- createFieldWithBoolAnnotationLogEvent(&event, GetParam(),
- ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID, true,
- /*parseBufferResult*/ false);
+ if (std::get<0>(GetParam()) != ATTRIBUTION_CHAIN_TYPE) {
+ EXPECT_FALSE(createFieldWithBoolAnnotationLogEvent(
+ &event, std::get<0>(GetParam()), ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID,
+ true,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
}
}
-TEST(LogEventTest, TestPrimaryFieldFirstUidAnnotation_NotIntAnnotation) {
+TEST_P(LogEventTest, TestPrimaryFieldFirstUidAnnotation_NotIntAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithIntAnnotationLogEvent(&event, ATTRIBUTION_CHAIN_TYPE,
- ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID, 10,
- /*parseBufferResult*/ false);
+ EXPECT_FALSE(createFieldWithIntAnnotationLogEvent(
+ &event, ATTRIBUTION_CHAIN_TYPE, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID, 10,
+ /*doHeaderPrefetch=*/GetParam()));
}
-TEST(LogEventTest, TestResetStateAnnotation) {
+TEST_P(LogEventTest, TestResetStateAnnotation) {
int32_t resetState = 10;
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithIntAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_TRIGGER_STATE_RESET,
- resetState, /*parseBufferResult*/ true);
+ EXPECT_TRUE(createFieldWithIntAnnotationLogEvent(
+ &event, INT32_TYPE, ASTATSLOG_ANNOTATION_ID_TRIGGER_STATE_RESET, resetState,
+ /*doHeaderPrefetch=*/GetParam()));
const vector<FieldValue>& values = event.getValues();
ASSERT_EQ(values.size(), 1);
EXPECT_EQ(event.getResetState(), resetState);
}
+TEST_P(LogEventTest, TestRestrictionCategoryAnnotation) {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ int32_t restrictionCategory = ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC;
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ EXPECT_TRUE(createAtomLevelIntAnnotationLogEvent(
+ &event, INT32_TYPE, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY, restrictionCategory,
+ /*doHeaderPrefetch=*/GetParam()));
+
+ ASSERT_EQ(event.getRestrictionCategory(), restrictionCategory);
+}
+
+TEST_P(LogEventTest, TestInvalidRestrictionCategoryAnnotation) {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ int32_t restrictionCategory = 619; // unknown category
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ EXPECT_FALSE(createAtomLevelIntAnnotationLogEvent(
+ &event, INT32_TYPE, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY, restrictionCategory,
+ /*doHeaderPrefetch=*/GetParam()));
+}
+
+TEST_P(LogEventTest, TestRestrictionCategoryAnnotationBelowUDevice) {
+ if (IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ int32_t restrictionCategory = ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC;
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ EXPECT_FALSE(createAtomLevelIntAnnotationLogEvent(
+ &event, INT32_TYPE, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY, restrictionCategory,
+ /*doHeaderPrefetch=*/GetParam()));
+}
+
TEST_P(LogEventTestBadAnnotationFieldTypes, TestResetStateAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
int32_t resetState = 10;
- if (GetParam() != INT32_TYPE) {
- createFieldWithIntAnnotationLogEvent(&event, GetParam(), ANNOTATION_ID_TRIGGER_STATE_RESET,
- resetState,
- /*parseBufferResult*/ false);
+ if (std::get<0>(GetParam()) != INT32_TYPE) {
+ EXPECT_FALSE(createFieldWithIntAnnotationLogEvent(
+ &event, std::get<0>(GetParam()), ASTATSLOG_ANNOTATION_ID_TRIGGER_STATE_RESET,
+ resetState,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
}
}
-TEST(LogEventTest, TestResetStateAnnotation_NotBoolAnnotation) {
+TEST_P(LogEventTest, TestResetStateAnnotation_NotBoolAnnotation) {
LogEvent event(/*uid=*/0, /*pid=*/0);
- createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE, ANNOTATION_ID_TRIGGER_STATE_RESET,
- true,
- /*parseBufferResult*/ false);
+ EXPECT_FALSE(createFieldWithBoolAnnotationLogEvent(
+ &event, INT32_TYPE, ASTATSLOG_ANNOTATION_ID_TRIGGER_STATE_RESET, true,
+ /*doHeaderPrefetch=*/GetParam()));
}
-TEST(LogEventTest, TestUidAnnotationWithInt8MaxValues) {
+TEST_P(LogEventTest, TestUidAnnotationWithInt8MaxValues) {
int32_t numElements = INT8_MAX;
int32_t int32Array[numElements];
@@ -906,18 +1064,18 @@
AStatsEvent_writeInt32Array(event, int32Array, numElements);
AStatsEvent_writeInt32(event, 10);
AStatsEvent_writeInt32(event, 11);
- AStatsEvent_addBoolAnnotation(event, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(event, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_TRUE(logEvent.parseBuffer(buf, size));
+ EXPECT_TRUE(ParseBuffer(logEvent, buf, size));
AStatsEvent_release(event);
}
-TEST(LogEventTest, TestEmptyAttributionChainWithPrimaryFieldFirstUidAnnotation) {
+TEST_P(LogEventTest, TestEmptyAttributionChainWithPrimaryFieldFirstUidAnnotation) {
AStatsEvent* event = AStatsEvent_obtain();
AStatsEvent_setAtomId(event, 100);
@@ -926,19 +1084,116 @@
AStatsEvent_writeInt32(event, 10);
AStatsEvent_writeAttributionChain(event, uids, tags, 0);
- AStatsEvent_addBoolAnnotation(event, ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID, true);
+ AStatsEvent_addBoolAnnotation(event, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID, true);
AStatsEvent_build(event);
size_t size;
- uint8_t* buf = AStatsEvent_getBuffer(event, &size);
+ const uint8_t* buf = AStatsEvent_getBuffer(event, &size);
LogEvent logEvent(/*uid=*/1000, /*pid=*/1001);
- EXPECT_FALSE(logEvent.parseBuffer(buf, size));
+ EXPECT_FALSE(ParseBuffer(logEvent, buf, size));
AStatsEvent_release(event);
}
+// Setup for parameterized tests.
+class LogEvent_FieldRestrictionTest : public testing::TestWithParam<std::tuple<int, bool>> {
+public:
+ static std::string ToString(testing::TestParamInfo<std::tuple<int, bool>> info) {
+ const std::string boolName = std::get<1>(info.param) ? "_prefetchTrue" : "_prefetchFalse";
+
+ switch (std::get<0>(info.param)) {
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_PERIPHERAL_DEVICE_INFO:
+ return "PeripheralDeviceInfo" + boolName;
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_APP_USAGE:
+ return "AppUsage" + boolName;
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_APP_ACTIVITY:
+ return "AppActivity" + boolName;
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_HEALTH_CONNECT:
+ return "HealthConnect" + boolName;
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_ACCESSIBILITY:
+ return "Accessibility" + boolName;
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_SYSTEM_SEARCH:
+ return "SystemSearch" + boolName;
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_USER_ENGAGEMENT:
+ return "UserEngagement" + boolName;
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_AMBIENT_SENSING:
+ return "AmbientSensing" + boolName;
+ case ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_DEMOGRAPHIC_CLASSIFICATION:
+ return "DemographicClassification" + boolName;
+ default:
+ return "Unknown" + boolName;
+ }
+ }
+ void TearDown() override {
+ FlagProvider::getInstance().resetOverrides();
+ }
+};
+
+// TODO(b/222539899): Add BOOL_TYPE value once parseAnnotations is updated to check specific
+// typeIds. BOOL_TYPE should be a bad field type for is_uid, nested, and reset state annotations.
+INSTANTIATE_TEST_SUITE_P(
+ LogEvent_FieldRestrictionTest, LogEvent_FieldRestrictionTest,
+ testing::Combine(
+ testing::Values(
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_PERIPHERAL_DEVICE_INFO,
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_APP_USAGE,
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_APP_ACTIVITY,
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_HEALTH_CONNECT,
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_ACCESSIBILITY,
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_SYSTEM_SEARCH,
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_USER_ENGAGEMENT,
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_AMBIENT_SENSING,
+ ASTATSLOG_ANNOTATION_ID_FIELD_RESTRICTION_DEMOGRAPHIC_CLASSIFICATION),
+ testing::Bool()),
+ LogEvent_FieldRestrictionTest::ToString);
+
+TEST_P(LogEvent_FieldRestrictionTest, TestFieldRestrictionAnnotation) {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ EXPECT_TRUE(
+ createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE, std::get<0>(GetParam()), true,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
+ // Some basic checks to make sure the event is parsed correctly.
+ EXPECT_EQ(event.GetTagId(), 100);
+ ASSERT_EQ(event.getValues().size(), 1);
+ EXPECT_EQ(event.getValues()[0].mValue.getType(), Type::INT);
+}
+
+TEST_P(LogEvent_FieldRestrictionTest, TestInvalidAnnotationIntType) {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ EXPECT_FALSE(createFieldWithIntAnnotationLogEvent(
+ &event, STRING_TYPE, std::get<0>(GetParam()),
+ /*random int*/ 15, /*doHeaderPrefetch=*/std::get<1>(GetParam())));
+}
+
+TEST_P(LogEvent_FieldRestrictionTest, TestInvalidAnnotationAtomLevel) {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ EXPECT_FALSE(createAtomLevelBoolAnnotationLogEvent(
+ &event, STRING_TYPE, std::get<0>(GetParam()), true,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
+}
+
+TEST_P(LogEvent_FieldRestrictionTest, TestRestrictionCategoryAnnotationBelowUDevice) {
+ if (IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ int32_t restrictionCategory = ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC;
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ EXPECT_FALSE(
+ createFieldWithBoolAnnotationLogEvent(&event, INT32_TYPE, std::get<0>(GetParam()), true,
+ /*doHeaderPrefetch=*/std::get<1>(GetParam())));
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/MetricsManager_test.cpp b/statsd/tests/MetricsManager_test.cpp
index b5e8d8f..16e9ab7 100644
--- a/statsd/tests/MetricsManager_test.cpp
+++ b/statsd/tests/MetricsManager_test.cpp
@@ -35,6 +35,8 @@
using namespace testing;
using android::sp;
+using android::modules::sdklevel::IsAtLeastS;
+using android::modules::sdklevel::IsAtLeastU;
using android::os::statsd::Predicate;
using std::map;
using std::set;
@@ -93,6 +95,22 @@
return config;
}
+StatsdConfig buildGoodRestrictedConfig() {
+ StatsdConfig config;
+ config.set_id(12345);
+ config.set_restricted_metrics_delegate_package_name("delegate");
+
+ AtomMatcher* eventMatcher = config.add_atom_matcher();
+ eventMatcher->set_id(StringToId("SCREEN_IS_ON"));
+ SimpleAtomMatcher* simpleAtomMatcher = eventMatcher->mutable_simple_atom_matcher();
+ simpleAtomMatcher->set_atom_id(2 /*SCREEN_STATE_CHANGE*/);
+
+ EventMetric* metric = config.add_event_metric();
+ metric->set_id(3);
+ metric->set_what(StringToId("SCREEN_IS_ON"));
+ return config;
+}
+
set<int32_t> unionSet(const vector<set<int32_t>> sets) {
set<int32_t> toRet;
for (const set<int32_t>& s : sets) {
@@ -264,6 +282,43 @@
UnorderedElementsAreArray(unionSet({defaultPullUids, app2Uids, {AID_ADB}})));
}
+struct MetricsManagerServerFlagParam {
+ string flagValue;
+ string label;
+};
+
+class MetricsManagerTest_SPlus
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<MetricsManagerServerFlagParam> {
+protected:
+ void SetUp() override {
+ if (shouldSkipTest()) {
+ GTEST_SKIP() << skipReason();
+ }
+ }
+
+ bool shouldSkipTest() const {
+ return !IsAtLeastS();
+ }
+
+ string skipReason() const {
+ return "Skipping MetricsManagerTest_SPlus because device is not S+";
+ }
+
+ std::optional<string> originalFlagValue;
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ MetricsManagerTest_SPlus, MetricsManagerTest_SPlus,
+ testing::ValuesIn<MetricsManagerServerFlagParam>({
+ // Server flag values
+ {FLAG_TRUE, "ServerFlagTrue"},
+ {FLAG_FALSE, "ServerFlagFalse"},
+ }),
+ [](const testing::TestParamInfo<MetricsManagerTest_SPlus::ParamType>& info) {
+ return info.param.label;
+ });
+
TEST(MetricsManagerTest, TestCheckLogCredentialsWhitelistedAtom) {
sp<UidMap> uidMap;
sp<StatsPullerManager> pullerManager = new StatsPullerManager();
@@ -316,6 +371,54 @@
EXPECT_FALSE(metricsManager.isConfigValid());
}
+TEST_P(MetricsManagerTest_SPlus, TestRestrictedMetricsConfig) {
+ sp<UidMap> uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
+ sp<AlarmMonitor> anomalyAlarmMonitor;
+ sp<AlarmMonitor> periodicAlarmMonitor;
+
+ StatsdConfig config = buildGoodRestrictedConfig();
+ config.add_allowed_log_source("AID_SYSTEM");
+ config.set_restricted_metrics_delegate_package_name("rm");
+
+ MetricsManager metricsManager(kConfigKey, config, timeBaseSec, timeBaseSec, uidMap,
+ pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor);
+
+ if (IsAtLeastU()) {
+ EXPECT_TRUE(metricsManager.isConfigValid());
+ } else {
+ EXPECT_EQ(metricsManager.mInvalidConfigReason,
+ INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_ENABLED);
+ ASSERT_FALSE(metricsManager.isConfigValid());
+ }
+}
+
+TEST_P(MetricsManagerTest_SPlus, TestRestrictedMetricsConfigUpdate) {
+ sp<UidMap> uidMap;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
+ sp<AlarmMonitor> anomalyAlarmMonitor;
+ sp<AlarmMonitor> periodicAlarmMonitor;
+
+ StatsdConfig config = buildGoodRestrictedConfig();
+ config.add_allowed_log_source("AID_SYSTEM");
+ config.set_restricted_metrics_delegate_package_name("rm");
+
+ MetricsManager metricsManager(kConfigKey, config, timeBaseSec, timeBaseSec, uidMap,
+ pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor);
+
+ StatsdConfig config2 = buildGoodRestrictedConfig();
+ metricsManager.updateConfig(config, timeBaseSec, timeBaseSec, anomalyAlarmMonitor,
+ periodicAlarmMonitor);
+
+ if (IsAtLeastU()) {
+ EXPECT_TRUE(metricsManager.isConfigValid());
+ } else {
+ EXPECT_EQ(metricsManager.mInvalidConfigReason,
+ INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_ENABLED);
+ ASSERT_FALSE(metricsManager.isConfigValid());
+ }
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/SocketListener_test.cpp b/statsd/tests/SocketListener_test.cpp
new file mode 100644
index 0000000..42c084b
--- /dev/null
+++ b/statsd/tests/SocketListener_test.cpp
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <gtest/gtest.h>
+
+#include "socket/StatsSocketListener.h"
+#include "tests/statsd_test_util.h"
+
+#ifdef __ANDROID__
+
+namespace android {
+namespace os {
+namespace statsd {
+
+namespace {
+
+constexpr uint32_t kTestUid = 1001;
+constexpr uint32_t kTestPid = 1002;
+constexpr int kEventCount = 1000;
+constexpr int kEventFilteredCount = 500;
+constexpr int kAtomId = 1000;
+
+class AStatsEventWrapper final {
+ AStatsEvent* statsEvent = nullptr;
+
+public:
+ AStatsEventWrapper(int atomId) {
+ statsEvent = AStatsEvent_obtain();
+ createStatsEvent(statsEvent, INT64_TYPE, /*atomId=*/atomId);
+ AStatsEvent_build(statsEvent);
+ }
+
+ std::pair<const uint8_t*, size_t> getBuffer() const {
+ size_t size;
+ const uint8_t* buf = AStatsEvent_getBuffer(statsEvent, &size);
+ return std::make_pair(buf, size);
+ }
+
+ ~AStatsEventWrapper() {
+ AStatsEvent_release(statsEvent);
+ }
+};
+
+} // namespace
+
+void generateAtomLogging(const std::shared_ptr<LogEventQueue>& queue,
+ const std::shared_ptr<LogEventFilter>& filter, int eventCount,
+ int startAtomId) {
+ // create number of AStatsEvent
+ for (int i = 0; i < eventCount; i++) {
+ AStatsEventWrapper event(startAtomId + i);
+ auto [buf, size] = event.getBuffer();
+ StatsSocketListener::processMessage(buf, size, kTestUid, kTestPid, queue, filter);
+ }
+}
+
+class SocketParseMessageTestNoFiltering : public testing::TestWithParam<bool> {
+protected:
+ std::shared_ptr<LogEventQueue> mEventQueue;
+ std::shared_ptr<LogEventFilter> mLogEventFilter;
+
+public:
+ SocketParseMessageTestNoFiltering()
+ : mEventQueue(std::make_shared<LogEventQueue>(kEventCount /*buffer limit*/)),
+ mLogEventFilter(GetParam() ? std::make_shared<LogEventFilter>() : nullptr) {
+ }
+
+ static std::string ToString(testing::TestParamInfo<bool> info) {
+ return info.param ? "WithEventFilter" : "NoEventFilter";
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(SocketParseMessageTestNoFiltering, SocketParseMessageTestNoFiltering,
+ testing::Bool(), SocketParseMessageTestNoFiltering::ToString);
+
+TEST_P(SocketParseMessageTestNoFiltering, TestProcessMessageNoFiltering) {
+ if (GetParam()) {
+ mLogEventFilter->setFilteringEnabled(false);
+ }
+
+ generateAtomLogging(mEventQueue, mLogEventFilter, kEventCount, kAtomId);
+
+ // check content of the queue
+ EXPECT_EQ(kEventCount, mEventQueue->mQueue.size());
+ for (int i = 0; i < kEventCount; i++) {
+ auto logEvent = mEventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_FALSE(logEvent->isParsedHeaderOnly());
+ }
+}
+
+TEST_P(SocketParseMessageTestNoFiltering, TestProcessMessageNoFilteringWithEmptySetExplicitSet) {
+ if (GetParam()) {
+ mLogEventFilter->setFilteringEnabled(false);
+ LogEventFilter::AtomIdSet idsList;
+ mLogEventFilter->setAtomIds(idsList, nullptr);
+ }
+
+ generateAtomLogging(mEventQueue, mLogEventFilter, kEventCount, kAtomId);
+
+ // check content of the queue
+ EXPECT_EQ(kEventCount, mEventQueue->mQueue.size());
+ for (int i = 0; i < kEventCount; i++) {
+ auto logEvent = mEventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_FALSE(logEvent->isParsedHeaderOnly());
+ }
+}
+
+TEST(SocketParseMessageTest, TestProcessMessageFilterEmptySet) {
+ std::shared_ptr<LogEventQueue> eventQueue =
+ std::make_shared<LogEventQueue>(kEventCount /*buffer limit*/);
+
+ std::shared_ptr<LogEventFilter> logEventFilter = std::make_shared<LogEventFilter>();
+
+ generateAtomLogging(eventQueue, logEventFilter, kEventCount, kAtomId);
+
+ // check content of the queue
+ for (int i = 0; i < kEventCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_TRUE(logEvent->isParsedHeaderOnly());
+ }
+}
+
+TEST(SocketParseMessageTest, TestProcessMessageFilterEmptySetExplicitSet) {
+ std::shared_ptr<LogEventQueue> eventQueue =
+ std::make_shared<LogEventQueue>(kEventCount /*buffer limit*/);
+
+ std::shared_ptr<LogEventFilter> logEventFilter = std::make_shared<LogEventFilter>();
+
+ LogEventFilter::AtomIdSet idsList;
+ logEventFilter->setAtomIds(idsList, nullptr);
+
+ generateAtomLogging(eventQueue, logEventFilter, kEventCount, kAtomId);
+
+ // check content of the queue
+ for (int i = 0; i < kEventCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_TRUE(logEvent->isParsedHeaderOnly());
+ }
+}
+
+TEST(SocketParseMessageTest, TestProcessMessageFilterCompleteSet) {
+ std::shared_ptr<LogEventQueue> eventQueue =
+ std::make_shared<LogEventQueue>(kEventCount /*buffer limit*/);
+
+ std::shared_ptr<LogEventFilter> logEventFilter = std::make_shared<LogEventFilter>();
+
+ LogEventFilter::AtomIdSet idsList;
+ for (int i = 0; i < kEventCount; i++) {
+ idsList.insert(kAtomId + i);
+ }
+ logEventFilter->setAtomIds(idsList, nullptr);
+
+ generateAtomLogging(eventQueue, logEventFilter, kEventCount, kAtomId);
+
+ // check content of the queue
+ EXPECT_EQ(kEventCount, eventQueue->mQueue.size());
+ for (int i = 0; i < kEventCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_FALSE(logEvent->isParsedHeaderOnly());
+ }
+}
+
+TEST(SocketParseMessageTest, TestProcessMessageFilterPartialSet) {
+ std::shared_ptr<LogEventQueue> eventQueue =
+ std::make_shared<LogEventQueue>(kEventCount /*buffer limit*/);
+
+ std::shared_ptr<LogEventFilter> logEventFilter = std::make_shared<LogEventFilter>();
+
+ LogEventFilter::AtomIdSet idsList;
+ for (int i = 0; i < kEventFilteredCount; i++) {
+ idsList.insert(kAtomId + i);
+ }
+ logEventFilter->setAtomIds(idsList, nullptr);
+
+ generateAtomLogging(eventQueue, logEventFilter, kEventCount, kAtomId);
+
+ // check content of the queue
+ EXPECT_EQ(kEventCount, eventQueue->mQueue.size());
+ for (int i = 0; i < kEventFilteredCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_FALSE(logEvent->isParsedHeaderOnly());
+ }
+
+ for (int i = kEventFilteredCount; i < kEventCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_TRUE(logEvent->isParsedHeaderOnly());
+ }
+}
+
+TEST(SocketParseMessageTest, TestProcessMessageFilterToggle) {
+ std::shared_ptr<LogEventQueue> eventQueue =
+ std::make_shared<LogEventQueue>(kEventCount * 3 /*buffer limit*/);
+
+ std::shared_ptr<LogEventFilter> logEventFilter = std::make_shared<LogEventFilter>();
+
+ LogEventFilter::AtomIdSet idsList;
+ for (int i = 0; i < kEventFilteredCount; i++) {
+ idsList.insert(kAtomId + i);
+ }
+ // events with ids from kAtomId to kAtomId + kEventFilteredCount should not be skipped
+ logEventFilter->setAtomIds(idsList, nullptr);
+
+ generateAtomLogging(eventQueue, logEventFilter, kEventCount, kAtomId);
+
+ logEventFilter->setFilteringEnabled(false);
+ // since filtering is disabled - events with any ids should not be skipped
+ // will generate events with ids [kAtomId + kEventCount, kAtomId + kEventCount * 2]
+ generateAtomLogging(eventQueue, logEventFilter, kEventCount, kAtomId + kEventCount);
+
+ logEventFilter->setFilteringEnabled(true);
+ LogEventFilter::AtomIdSet idsList2;
+ for (int i = kEventFilteredCount; i < kEventCount; i++) {
+ idsList2.insert(kAtomId + kEventCount * 2 + i);
+ }
+ // events with idsList2 ids should not be skipped
+ logEventFilter->setAtomIds(idsList2, nullptr);
+
+ // will generate events with ids [kAtomId + kEventCount * 2, kAtomId + kEventCount * 3]
+ generateAtomLogging(eventQueue, logEventFilter, kEventCount, kAtomId + kEventCount * 2);
+
+ // check content of the queue
+ EXPECT_EQ(kEventCount * 3, eventQueue->mQueue.size());
+ // events with ids from kAtomId to kAtomId + kEventFilteredCount should not be skipped
+ for (int i = 0; i < kEventFilteredCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_FALSE(logEvent->isParsedHeaderOnly());
+ }
+
+ // all events above kAtomId + kEventFilteredCount to kAtomId + kEventCount should be skipped
+ for (int i = kEventFilteredCount; i < kEventCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + i, logEvent->GetTagId());
+ EXPECT_TRUE(logEvent->isParsedHeaderOnly());
+ }
+
+ // events with ids [kAtomId + kEventCount, kAtomId + kEventCount * 2] should not be skipped
+ // since wiltering was disabled at that time
+ for (int i = 0; i < kEventCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + kEventCount + i, logEvent->GetTagId());
+ EXPECT_FALSE(logEvent->isParsedHeaderOnly());
+ }
+
+ // first half events with ids [kAtomId + kEventCount * 2, kAtomId + kEventCount * 3]
+ // should be skipped
+ for (int i = 0; i < kEventFilteredCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + kEventCount * 2 + i, logEvent->GetTagId());
+ EXPECT_TRUE(logEvent->isParsedHeaderOnly());
+ }
+
+ // second half events with ids [kAtomId + kEventCount * 2, kAtomId + kEventCount * 3]
+ // should be processed
+ for (int i = kEventFilteredCount; i < kEventCount; i++) {
+ auto logEvent = eventQueue->waitPop();
+ EXPECT_TRUE(logEvent->isValid());
+ EXPECT_EQ(kAtomId + kEventCount * 2 + i, logEvent->GetTagId());
+ EXPECT_FALSE(logEvent->isParsedHeaderOnly());
+ }
+}
+
+// TODO: tests for setAtomIds() with multiple consumers
+// TODO: use MockLogEventFilter to test different sets from different consumers
+
+} // namespace statsd
+} // namespace os
+} // namespace android
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
diff --git a/statsd/tests/StatsLogProcessor_test.cpp b/statsd/tests/StatsLogProcessor_test.cpp
index 9697225..d29b5e6 100644
--- a/statsd/tests/StatsLogProcessor_test.cpp
+++ b/statsd/tests/StatsLogProcessor_test.cpp
@@ -15,6 +15,7 @@
#include "StatsLogProcessor.h"
#include <android-base/stringprintf.h>
+#include <android-modules-utils/sdk_level.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <stdio.h>
@@ -26,9 +27,11 @@
#include "packages/UidMap.h"
#include "src/stats_log.pb.h"
#include "src/statsd_config.pb.h"
+#include "state/StateManager.h"
#include "statslog_statsdtest.h"
#include "storage/StorageManager.h"
#include "tests/statsd_test_util.h"
+#include "utils/DbUtils.h"
using namespace android;
using namespace testing;
@@ -40,8 +43,11 @@
namespace statsd {
using android::base::StringPrintf;
+using android::modules::sdklevel::IsAtLeastU;
using android::util::ProtoOutputStream;
+using ::testing::Expectation;
+
#ifdef __ANDROID__
#define STATS_DATA_DIR "/data/misc/stats-data"
@@ -50,20 +56,29 @@
*/
class MockMetricsManager : public MetricsManager {
public:
- MockMetricsManager()
- : MetricsManager(ConfigKey(1, 12345), StatsdConfig(), 1000, 1000, new UidMap(),
+ MockMetricsManager(ConfigKey configKey = ConfigKey(1, 12345))
+ : MetricsManager(configKey, StatsdConfig(), 1000, 1000, new UidMap(),
new StatsPullerManager(),
- new AlarmMonitor(10,
- [](const shared_ptr<IStatsCompanionService>&, int64_t) {},
- [](const shared_ptr<IStatsCompanionService>&) {}),
- new AlarmMonitor(10,
- [](const shared_ptr<IStatsCompanionService>&, int64_t) {},
- [](const shared_ptr<IStatsCompanionService>&) {})) {
+ new AlarmMonitor(
+ 10, [](const shared_ptr<IStatsCompanionService>&, int64_t) {},
+ [](const shared_ptr<IStatsCompanionService>&) {}),
+ new AlarmMonitor(
+ 10, [](const shared_ptr<IStatsCompanionService>&, int64_t) {},
+ [](const shared_ptr<IStatsCompanionService>&) {})) {
}
MOCK_METHOD0(byteSize, size_t());
MOCK_METHOD1(dropData, void(const int64_t dropTimeNs));
+
+ MOCK_METHOD(void, onLogEvent, (const LogEvent& event), (override));
+
+ MOCK_METHOD(void, onDumpReport,
+ (const int64_t dumpTimeNs, const int64_t wallClockNs,
+ const bool include_current_partial_bucket, const bool erase_data,
+ const DumpLatency dumpLatency, std::set<string>* str_set,
+ android::util::ProtoOutputStream* protoOutput),
+ (override));
};
TEST(StatsLogProcessorTest, TestRateLimitByteSize) {
@@ -72,9 +87,11 @@
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> periodicAlarmMonitor;
// Construct the processor with a no-op sendBroadcast function that does nothing.
- StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor, 0,
- [](const ConfigKey& key) { return true; },
- [](const int&, const vector<int64_t>&) {return true;});
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor, 0,
+ [](const ConfigKey& key) { return true; },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, nullptr);
MockMetricsManager mockMetricsManager;
@@ -93,12 +110,14 @@
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) {
- broadcastCount++;
- return true;
- },
- [](const int&, const vector<int64_t>&) {return true;});
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, nullptr);
MockMetricsManager mockMetricsManager;
@@ -125,12 +144,14 @@
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) {
- broadcastCount++;
- return true;
- },
- [](const int&, const vector<int64_t>&) {return true;});
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, nullptr);
MockMetricsManager mockMetricsManager;
@@ -161,8 +182,49 @@
return config;
}
+StatsdConfig makeRestrictedConfig(bool includeMetric = false) {
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ config.set_restricted_metrics_delegate_package_name("delegate");
+
+ if (includeMetric) {
+ auto appCrashMatcher = CreateProcessCrashAtomMatcher();
+ *config.add_atom_matcher() = appCrashMatcher;
+ auto eventMetric = config.add_event_metric();
+ eventMetric->set_id(StringToId("EventAppCrashes"));
+ eventMetric->set_what(appCrashMatcher.id());
+ }
+ return config;
+}
+
+class MockRestrictedMetricsManager : public MetricsManager {
+public:
+ MockRestrictedMetricsManager(ConfigKey configKey = ConfigKey(1, 12345))
+ : MetricsManager(configKey, makeRestrictedConfig(), 1000, 1000, new UidMap(),
+ new StatsPullerManager(),
+ new AlarmMonitor(
+ 10, [](const shared_ptr<IStatsCompanionService>&, int64_t) {},
+ [](const shared_ptr<IStatsCompanionService>&) {}),
+ new AlarmMonitor(
+ 10, [](const shared_ptr<IStatsCompanionService>&, int64_t) {},
+ [](const shared_ptr<IStatsCompanionService>&) {})) {
+ }
+
+ MOCK_METHOD(void, onLogEvent, (const LogEvent& event), (override));
+ MOCK_METHOD(void, onDumpReport,
+ (const int64_t dumpTimeNs, const int64_t wallClockNs,
+ const bool include_current_partial_bucket, const bool erase_data,
+ const DumpLatency dumpLatency, std::set<string>* str_set,
+ android::util::ProtoOutputStream* protoOutput),
+ (override));
+ MOCK_METHOD(size_t, byteSize, (), (override));
+ MOCK_METHOD(void, flushRestrictedData, (), (override));
+};
+
TEST(StatsLogProcessorTest, TestUidMapHasSnapshot) {
- // Setup simple config key corresponding to empty config.
+ ConfigKey key(3, 4);
+ StatsdConfig config = MakeConfig(true);
+
sp<UidMap> m = new UidMap();
sp<StatsPullerManager> pullerManager = new StatsPullerManager();
m->updateMap(1, {1, 2}, {1, 2}, {String16("v1"), String16("v2")},
@@ -171,14 +233,21 @@
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) {
- broadcastCount++;
- return true;
- },
- [](const int&, const vector<int64_t>&) {return true;});
- ConfigKey key(3, 4);
- StatsdConfig config = MakeConfig(true);
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &p)).Times(1);
+
p.OnConfigUpdated(0, key, config);
// Expect to get no metrics, but snapshot specified above in uidmap.
@@ -195,6 +264,9 @@
TEST(StatsLogProcessorTest, TestEmptyConfigHasNoUidMap) {
// Setup simple config key corresponding to empty config.
+ ConfigKey key(3, 4);
+ StatsdConfig config = MakeConfig(false);
+
sp<UidMap> m = new UidMap();
sp<StatsPullerManager> pullerManager = new StatsPullerManager();
m->updateMap(1, {1, 2}, {1, 2}, {String16("v1"), String16("v2")},
@@ -203,14 +275,21 @@
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
int broadcastCount = 0;
- StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) {
- broadcastCount++;
- return true;
- },
- [](const int&, const vector<int64_t>&) {return true;});
- ConfigKey key(3, 4);
- StatsdConfig config = MakeConfig(false);
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &p)).Times(1);
+
p.OnConfigUpdated(0, key, config);
// Expect to get no metrics, but snapshot specified above in uidmap.
@@ -225,23 +304,33 @@
TEST(StatsLogProcessorTest, TestReportIncludesSubConfig) {
// Setup simple config key corresponding to empty config.
- sp<UidMap> m = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> subscriberAlarmMonitor;
- int broadcastCount = 0;
- StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [&broadcastCount](const ConfigKey& key) {
- broadcastCount++;
- return true;
- },
- [](const int&, const vector<int64_t>&) {return true;});
ConfigKey key(3, 4);
StatsdConfig config;
auto annotation = config.add_annotation();
annotation->set_field_int64(1);
annotation->set_field_int32(2);
config.add_allowed_log_source("AID_ROOT");
+
+ sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
+ sp<AlarmMonitor> anomalyAlarmMonitor;
+ sp<AlarmMonitor> subscriberAlarmMonitor;
+ int broadcastCount = 0;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [&broadcastCount](const ConfigKey& key) {
+ broadcastCount++;
+ return true;
+ },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &p)).Times(1);
+
p.OnConfigUpdated(1, key, config);
// Expect to get no metrics, but snapshot specified above in uidmap.
@@ -307,16 +396,25 @@
TEST(StatsLogProcessorTest, TestPullUidProviderSetOnConfigUpdate) {
// Setup simple config key corresponding to empty config.
+ ConfigKey key(3, 4);
+ StatsdConfig config = MakeConfig(false);
+
sp<UidMap> m = new UidMap();
sp<StatsPullerManager> pullerManager = new StatsPullerManager();
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
StatsLogProcessor p(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
[](const ConfigKey& key) { return true; },
- [](const int&, const vector<int64_t>&) { return true; });
- ConfigKey key(3, 4);
- StatsdConfig config = MakeConfig(false);
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &p)).Times(3);
+
p.OnConfigUpdated(0, key, config);
EXPECT_NE(pullerManager->mPullUidProviders.find(key), pullerManager->mPullUidProviders.end());
@@ -329,7 +427,9 @@
}
TEST(StatsLogProcessorTest, InvalidConfigRemoved) {
- // Setup simple config key corresponding to empty config.
+ ConfigKey key(3, 4);
+ StatsdConfig config = MakeConfig(true);
+
sp<UidMap> m = new UidMap();
sp<StatsPullerManager> pullerManager = new StatsPullerManager();
m->updateMap(1, {1, 2}, {1, 2}, {String16("v1"), String16("v2")},
@@ -337,11 +437,22 @@
/* certificateHash */ {{}, {}});
sp<AlarmMonitor> anomalyAlarmMonitor;
sp<AlarmMonitor> subscriberAlarmMonitor;
- StatsLogProcessor p(m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
- [](const ConfigKey& key) { return true; },
- [](const int&, const vector<int64_t>&) {return true;});
- ConfigKey key(3, 4);
- StatsdConfig config = MakeConfig(true);
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [](const ConfigKey& key) { return true; },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(CreateAtomIdSetDefault(), &p)).Times(1);
+ // atom used by matcher defined in MakeConfig() API
+ Expectation exp =
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(config), &p))
+ .Times(1);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(CreateAtomIdSetDefault(), &p)).Times(1).After(exp);
+
// Remove the config mConfigStats so that the Icebox starts at 0 configs.
p.OnConfigRemoved(key);
StatsdStats::getInstance().reset();
@@ -366,7 +477,6 @@
StorageManager::deleteSuffixedFiles(STATS_DATA_DIR, suffix.c_str());
}
-
TEST(StatsLogProcessorTest, TestActiveConfigMetricDiskWriteRead) {
int uid = 1111;
@@ -464,6 +574,9 @@
long timeBase1 = 1;
int broadcastCount = 0;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
StatsLogProcessor processor(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, timeBase1,
[](const ConfigKey& key) { return true; },
@@ -475,7 +588,12 @@
activeConfigsBroadcast.insert(activeConfigsBroadcast.end(), activeConfigs.begin(),
activeConfigs.end());
return true;
- });
+ },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ // config1,config2,config3 use the same atom
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config1);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &processor)).Times(3);
processor.OnConfigUpdated(1, cfgKey1, config1);
processor.OnConfigUpdated(2, cfgKey2, config2);
@@ -1560,7 +1678,9 @@
metric2ActivationTrigger2->set_activation_type(ACTIVATE_IMMEDIATELY);
// Send the config.
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+ const sp<UidMap> uidMap = new UidMap();
+ const shared_ptr<StatsService> service =
+ SharedRefBase::make<StatsService>(uidMap, /* queue */ nullptr, nullptr);
string serialized = config1.SerializeAsString();
service->addConfigurationChecked(uid, configId, {serialized.begin(), serialized.end()});
@@ -1732,6 +1852,34 @@
vector<uint8_t> buffer;
processor->onDumpReport(cfgKey1, configAddedTimeNs + NS_PER_SEC, false, true, ADB_DUMP, FAST,
&buffer);
+
+ service->removeConfiguration(configId, uid);
+ service->mProcessor->onDumpReport(cfgKey1, getElapsedRealtimeNs(),
+ false /* include_current_bucket*/, true /* erase_data */,
+ ADB_DUMP, NO_TIME_CONSTRAINTS, nullptr);
+}
+
+TEST(StatsLogProcessorTest, LogEventFilterOnSetPrintLogs) {
+ sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
+ sp<AlarmMonitor> anomalyAlarmMonitor;
+ sp<AlarmMonitor> periodicAlarmMonitor;
+
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor, 0,
+ [](const ConfigKey& key) { return true; },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ Expectation filterSetFalse =
+ EXPECT_CALL(*mockLogEventFilter, setFilteringEnabled(false)).Times(1);
+ EXPECT_CALL(*mockLogEventFilter, setFilteringEnabled(true)).Times(1).After(filterSetFalse);
+
+ p.setPrintLogs(true);
+ p.setPrintLogs(false);
}
TEST(StatsLogProcessorTest_mapIsolatedUidToHostUid, LogHostUid) {
@@ -1998,7 +2146,201 @@
EXPECT_EQ(output.reports_size(), 1);
EXPECT_EQ(output.reports(0).current_report_elapsed_nanos(), dumpTime3Ns);
EXPECT_EQ(output.reports(0).last_report_elapsed_nanos(), dumpTime1Ns);
+}
+class StatsLogProcessorTestRestricted : public Test {
+protected:
+ const ConfigKey mConfigKey = ConfigKey(1, 12345);
+ void SetUp() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ }
+ void TearDown() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ FlagProvider::getInstance().resetOverrides();
+ StorageManager::deleteAllFiles(STATS_DATA_DIR);
+ dbutils::deleteDb(mConfigKey);
+ }
+};
+
+TEST_F(StatsLogProcessorTestRestricted, TestInconsistentRestrictedMetricsConfigUpdate) {
+ ConfigKey key(3, 4);
+ StatsdConfig config = makeRestrictedConfig(true);
+ config.set_restricted_metrics_delegate_package_name("rm_package");
+
+ sp<UidMap> m = new UidMap();
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
+ m->updateMap(1, {1, 2}, {1, 2}, {String16("v1"), String16("v2")},
+ {String16("p1"), String16("p2")}, {String16(""), String16("")},
+ /* certificateHash */ {{}, {}});
+ sp<AlarmMonitor> anomalyAlarmMonitor;
+ sp<AlarmMonitor> subscriberAlarmMonitor;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ StatsLogProcessor p(
+ m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
+ [](const ConfigKey& key) { return true; },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ // new newConfig will be the same as config
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &p)).Times(2);
+
+ p.OnConfigUpdated(0, key, config);
+
+ EXPECT_EQ(1, p.mMetricsManagers.size());
+ EXPECT_NE(p.mMetricsManagers.find(key), p.mMetricsManagers.end());
+ sp<MetricsManager> oldMetricsManager = p.mMetricsManagers.find(key)->second;
+
+ StatsdConfig newConfig = makeRestrictedConfig(true);
+ newConfig.clear_restricted_metrics_delegate_package_name();
+ p.OnConfigUpdated(/*timestampNs=*/0, key, newConfig);
+
+ ASSERT_NE(p.mMetricsManagers.find(key)->second, oldMetricsManager);
+}
+
+TEST_F(StatsLogProcessorTestRestricted, TestRestrictedLogEventNotPassed) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, StatsdConfig(), mConfigKey);
+ ConfigKey key(3, 4);
+ sp<MockMetricsManager> metricsManager = new MockMetricsManager(mConfigKey);
+ EXPECT_CALL(*metricsManager, onLogEvent).Times(0);
+
+ processor->mMetricsManagers[mConfigKey] = metricsManager;
+ EXPECT_FALSE(processor->mMetricsManagers[mConfigKey]->hasRestrictedMetricsDelegate());
+
+ unique_ptr<LogEvent> event = CreateRestrictedLogEvent(123);
+ EXPECT_TRUE(event->isValid());
+ EXPECT_TRUE(event->isRestricted());
+ processor->OnLogEvent(event.get());
+}
+
+TEST_F(StatsLogProcessorTestRestricted, TestRestrictedLogEventPassed) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, StatsdConfig(), mConfigKey);
+ sp<MockRestrictedMetricsManager> metricsManager = new MockRestrictedMetricsManager(mConfigKey);
+ EXPECT_CALL(*metricsManager, onLogEvent).Times(1);
+
+ processor->mMetricsManagers[mConfigKey] = metricsManager;
+ EXPECT_TRUE(processor->mMetricsManagers[mConfigKey]->hasRestrictedMetricsDelegate());
+
+ unique_ptr<LogEvent> event = CreateRestrictedLogEvent(123);
+ EXPECT_TRUE(event->isValid());
+ EXPECT_TRUE(event->isRestricted());
+ processor->OnLogEvent(event.get());
+}
+
+TEST_F(StatsLogProcessorTestRestricted, RestrictedMetricsManagerOnDumpReportNotCalled) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, makeRestrictedConfig(/*includeMetric=*/true),
+ mConfigKey);
+ sp<MockRestrictedMetricsManager> metricsManager = new MockRestrictedMetricsManager(mConfigKey);
+ EXPECT_CALL(*metricsManager, onDumpReport).Times(0);
+
+ processor->mMetricsManagers[mConfigKey] = metricsManager;
+ EXPECT_TRUE(processor->mMetricsManagers[mConfigKey]->hasRestrictedMetricsDelegate());
+
+ vector<uint8_t> buffer;
+ processor->onConfigMetricsReportLocked(mConfigKey, /*dumpTimeStampNs=*/1, /*wallClockNs=*/0,
+ /*include_current_partial_bucket=*/true,
+ /*erase_data=*/true, GET_DATA_CALLED, FAST,
+ /*dataSavedToDisk=*/true, &buffer);
+}
+
+TEST_F(StatsLogProcessorTestRestricted, RestrictedMetricFlushIfReachMemoryLimit) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, makeRestrictedConfig(/*includeMetric=*/true),
+ mConfigKey);
+ sp<MockRestrictedMetricsManager> metricsManager = new MockRestrictedMetricsManager(mConfigKey);
+ EXPECT_CALL(*metricsManager, flushRestrictedData).Times(1);
+ EXPECT_CALL(*metricsManager, byteSize)
+ .Times(1)
+ .WillOnce(Return(StatsdStats::kBytesPerRestrictedConfigTriggerFlush + 1));
+
+ processor->mMetricsManagers[mConfigKey] = metricsManager;
+ EXPECT_TRUE(processor->mMetricsManagers[mConfigKey]->hasRestrictedMetricsDelegate());
+
+ processor->flushIfNecessaryLocked(mConfigKey, *metricsManager);
+}
+
+TEST_F(StatsLogProcessorTestRestricted, RestrictedMetricNotFlushIfNotReachMemoryLimit) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, makeRestrictedConfig(/*includeMetric=*/true),
+ mConfigKey);
+ sp<MockRestrictedMetricsManager> metricsManager = new MockRestrictedMetricsManager(mConfigKey);
+ EXPECT_CALL(*metricsManager, flushRestrictedData).Times(0);
+ EXPECT_CALL(*metricsManager, byteSize)
+ .Times(1)
+ .WillOnce(Return(StatsdStats::kBytesPerRestrictedConfigTriggerFlush - 1));
+
+ processor->mMetricsManagers[mConfigKey] = metricsManager;
+ EXPECT_TRUE(processor->mMetricsManagers[mConfigKey]->hasRestrictedMetricsDelegate());
+
+ processor->flushIfNecessaryLocked(mConfigKey, *metricsManager);
+}
+
+TEST_F(StatsLogProcessorTestRestricted, NonRestrictedMetricsManagerOnDumpReportCalled) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, MakeConfig(/*includeMetric=*/true), mConfigKey);
+ sp<MockMetricsManager> metricsManager = new MockMetricsManager(mConfigKey);
+ EXPECT_CALL(*metricsManager, onDumpReport).Times(1);
+
+ processor->mMetricsManagers[mConfigKey] = metricsManager;
+ EXPECT_FALSE(processor->mMetricsManagers[mConfigKey]->hasRestrictedMetricsDelegate());
+
+ vector<uint8_t> buffer;
+ processor->onConfigMetricsReportLocked(mConfigKey, /*dumpTimeStampNs=*/1, /*wallClockNs=*/0,
+ /*include_current_partial_bucket=*/true,
+ /*erase_data=*/true, GET_DATA_CALLED, FAST,
+ /*dataSavedToDisk=*/true, &buffer);
+}
+
+TEST_F(StatsLogProcessorTestRestricted, RestrictedMetricOnDumpReportEmpty) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, makeRestrictedConfig(/*includeMetric=*/true),
+ mConfigKey);
+ ProtoOutputStream proto;
+ processor->onDumpReport(mConfigKey, /*dumpTimeStampNs=*/1, /*wallClockNs=*/2,
+ /*include_current_partial_bucket=*/true, /*erase_data=*/true,
+ DEVICE_SHUTDOWN, FAST, &proto);
+ ASSERT_EQ(proto.size(), 0);
+}
+
+TEST_F(StatsLogProcessorTestRestricted, NonRestrictedMetricOnDumpReportNotEmpty) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, MakeConfig(/*includeMetric=*/true), mConfigKey);
+
+ ProtoOutputStream proto;
+ processor->onDumpReport(mConfigKey, /*dumpTimeStampNs=*/1, /*wallClockNs=*/2,
+ /*include_current_partial_bucket=*/true, /*erase_data=*/true,
+ DEVICE_SHUTDOWN, FAST, &proto);
+ ASSERT_NE(proto.size(), 0);
+}
+
+TEST_F(StatsLogProcessorTestRestricted, RestrictedMetricNotWriteToDisk) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, makeRestrictedConfig(/*includeMetric=*/true),
+ mConfigKey);
+
+ processor->WriteDataToDiskLocked(mConfigKey, /*timestampNs=*/0, /*wallClockNs=*/0,
+ CONFIG_UPDATED, FAST);
+
+ ASSERT_FALSE(StorageManager::hasConfigMetricsReport(mConfigKey));
+}
+
+TEST_F(StatsLogProcessorTestRestricted, NonRestrictedMetricWriteToDisk) {
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ /*timeBaseNs=*/1, /*currentTimeNs=*/1, MakeConfig(true), mConfigKey);
+
+ processor->WriteDataToDiskLocked(mConfigKey, /*timestampNs=*/0, /*wallClockNs=*/0,
+ CONFIG_UPDATED, FAST);
+
+ ASSERT_TRUE(StorageManager::hasConfigMetricsReport(mConfigKey));
}
#else
diff --git a/statsd/tests/StatsService_test.cpp b/statsd/tests/StatsService_test.cpp
index 9d01b0c..ed964a3 100644
--- a/statsd/tests/StatsService_test.cpp
+++ b/statsd/tests/StatsService_test.cpp
@@ -13,15 +13,17 @@
// limitations under the License.
#include "StatsService.h"
-#include "config/ConfigKey.h"
-#include "src/statsd_config.pb.h"
#include <android/binder_interface_utils.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
-
#include <stdio.h>
+#include "config/ConfigKey.h"
+#include "packages/UidMap.h"
+#include "src/statsd_config.pb.h"
+#include "tests/statsd_test_util.h"
+
using namespace android;
using namespace testing;
@@ -29,31 +31,80 @@
namespace os {
namespace statsd {
+using android::modules::sdklevel::IsAtLeastU;
using android::util::ProtoOutputStream;
using ::ndk::SharedRefBase;
#ifdef __ANDROID__
-TEST(StatsServiceTest, TestAddConfig_simple) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+namespace {
+
+const int64_t metricId = 123456;
+const int32_t ATOM_TAG = util::SUBSYSTEM_SLEEP_STATE;
+
+StatsdConfig CreateStatsdConfig(const GaugeMetric::SamplingType samplingType) {
StatsdConfig config;
- config.set_id(12345);
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+ config.add_default_pull_packages("AID_ROOT"); // Fake puller is registered with root.
+ auto atomMatcher = CreateSimpleAtomMatcher("TestMatcher", ATOM_TAG);
+ *config.add_atom_matcher() = atomMatcher;
+ *config.add_gauge_metric() =
+ createGaugeMetric("GAUGE1", atomMatcher.id(), samplingType, nullopt, nullopt);
+ config.set_hash_strings_in_metric_report(false);
+ return config;
+}
+
+class FakeSubsystemSleepCallbackWithTiming : public FakeSubsystemSleepCallback {
+public:
+ Status onPullAtom(int atomTag,
+ const shared_ptr<IPullAtomResultReceiver>& resultReceiver) override {
+ mPullTimeNs = getElapsedRealtimeNs();
+ return FakeSubsystemSleepCallback::onPullAtom(atomTag, resultReceiver);
+ }
+ int64_t mPullTimeNs = 0;
+};
+
+} // namespace
+
+TEST(StatsServiceTest, TestAddConfig_simple) {
+ const sp<UidMap> uidMap = new UidMap();
+ shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ nullptr);
+ const int kConfigKey = 12345;
+ const int kCallingUid = 123;
+ StatsdConfig config;
+ config.set_id(kConfigKey);
string serialized = config.SerializeAsString();
- EXPECT_TRUE(
- service->addConfigurationChecked(123, 12345, {serialized.begin(), serialized.end()}));
+ EXPECT_TRUE(service->addConfigurationChecked(kCallingUid, kConfigKey,
+ {serialized.begin(), serialized.end()}));
+ service->removeConfiguration(kConfigKey, kCallingUid);
+ ConfigKey configKey(kCallingUid, kConfigKey);
+ service->mProcessor->onDumpReport(configKey, getElapsedRealtimeNs(),
+ false /* include_current_bucket*/, true /* erase_data */,
+ ADB_DUMP, NO_TIME_CONSTRAINTS, nullptr);
}
TEST(StatsServiceTest, TestAddConfig_empty) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+ const sp<UidMap> uidMap = new UidMap();
+ shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ nullptr);
string serialized = "";
-
- EXPECT_TRUE(
- service->addConfigurationChecked(123, 12345, {serialized.begin(), serialized.end()}));
+ const int kConfigKey = 12345;
+ const int kCallingUid = 123;
+ EXPECT_TRUE(service->addConfigurationChecked(kCallingUid, kConfigKey,
+ {serialized.begin(), serialized.end()}));
+ service->removeConfiguration(kConfigKey, kCallingUid);
+ ConfigKey configKey(kCallingUid, kConfigKey);
+ service->mProcessor->onDumpReport(configKey, getElapsedRealtimeNs(),
+ false /* include_current_bucket*/, true /* erase_data */,
+ ADB_DUMP, NO_TIME_CONSTRAINTS, nullptr);
}
TEST(StatsServiceTest, TestAddConfig_invalid) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+ const sp<UidMap> uidMap = new UidMap();
+ shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ nullptr);
string serialized = "Invalid config!";
EXPECT_FALSE(
@@ -70,7 +121,9 @@
int32_t uid;
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+ const sp<UidMap> uidMap = new UidMap();
+ shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ nullptr);
service->mEngBuild = true;
// "-1"
@@ -95,6 +148,114 @@
EXPECT_FALSE(service->getUidFromArgs(args, 2, uid));
}
+class StatsServiceStatsdInitTest : public StatsServiceConfigTest,
+ public testing::WithParamInterface<bool> {
+public:
+ StatsServiceStatsdInitTest() : kInitDelaySec(GetParam() ? 0 : 3) {
+ }
+
+ static std::string ToString(testing::TestParamInfo<bool> info) {
+ return info.param ? "NoDelay" : "WithDelay";
+ }
+
+protected:
+ const int kInitDelaySec = 0;
+
+ shared_ptr<StatsService> createStatsService() override {
+ return SharedRefBase::make<StatsService>(new UidMap(), /*queue=*/nullptr,
+ /*LogEventFilter=*/nullptr,
+ /*initEventDelaySecs=*/kInitDelaySec);
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(StatsServiceStatsdInitTest, StatsServiceStatsdInitTest, testing::Bool(),
+ StatsServiceStatsdInitTest::ToString);
+
+TEST_P(StatsServiceStatsdInitTest, StatsServiceStatsdInitTest) {
+ // used for error threshold tolerance due to sleep() is involved
+ const int64_t ERROR_THRESHOLD_NS = GetParam() ? 1000000 : 5 * 1000000;
+
+ auto pullAtomCallback = SharedRefBase::make<FakeSubsystemSleepCallbackWithTiming>();
+
+ // TODO: evaluate to use service->registerNativePullAtomCallback() API
+ service->mPullerManager->RegisterPullAtomCallback(/*uid=*/0, ATOM_TAG, NS_PER_SEC,
+ NS_PER_SEC * 10, {}, pullAtomCallback);
+
+ const int64_t createConfigTimeNs = getElapsedRealtimeNs();
+ StatsdConfig config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE);
+ config.set_id(kConfigKey);
+ ASSERT_TRUE(sendConfig(config));
+ ASSERT_EQ(2, pullAtomCallback->pullNum);
+
+ service->mProcessor->mPullerManager->ForceClearPullerCache();
+
+ const int64_t initCompletedTimeNs = getElapsedRealtimeNs();
+ service->onStatsdInitCompleted();
+ ASSERT_EQ(3, pullAtomCallback->pullNum);
+
+ // Checking pull with or without delay according to the flag value
+ const int64_t lastPullNs = pullAtomCallback->mPullTimeNs;
+
+ if (GetParam()) {
+ // when flag is defined - should be small delay between init & pull
+ // expect delay smaller than 1 second
+ EXPECT_GE(lastPullNs, initCompletedTimeNs);
+ EXPECT_LE(lastPullNs, initCompletedTimeNs + ERROR_THRESHOLD_NS);
+ } else {
+ // when flag is not defined - big delay is expected (kInitDelaySec)
+ EXPECT_GE(lastPullNs, initCompletedTimeNs + kInitDelaySec * NS_PER_SEC);
+ EXPECT_LE(lastPullNs,
+ initCompletedTimeNs + kInitDelaySec * NS_PER_SEC + ERROR_THRESHOLD_NS);
+ }
+
+ const int64_t bucketSizeNs =
+ TimeUnitToBucketSizeInMillis(config.gauge_metric(0).bucket()) * 1000000;
+ const int64_t dumpReportTsNanos = createConfigTimeNs + bucketSizeNs + NS_PER_SEC;
+
+ vector<uint8_t> output;
+ ConfigKey configKey(kCallingUid, kConfigKey);
+ service->mProcessor->onDumpReport(configKey, dumpReportTsNanos,
+ /*include_current_bucket=*/false, /*erase_data=*/true,
+ ADB_DUMP, FAST, &output);
+ ConfigMetricsReportList reports;
+ reports.ParseFromArray(output.data(), output.size());
+ ASSERT_EQ(1, reports.reports_size());
+
+ backfillDimensionPath(&reports);
+ backfillStartEndTimestamp(&reports);
+ backfillAggregatedAtoms(&reports);
+ StatsLogReport::GaugeMetricDataWrapper gaugeMetrics =
+ reports.reports(0).metrics(0).gauge_metrics();
+ ASSERT_EQ(gaugeMetrics.skipped_size(), 0);
+ ASSERT_GT((int)gaugeMetrics.data_size(), 0);
+ const auto data = gaugeMetrics.data(0);
+ ASSERT_EQ(2, data.bucket_info_size());
+
+ const auto bucketInfo0 = data.bucket_info(0);
+ const auto bucketInfo1 = data.bucket_info(1);
+
+ EXPECT_GE(NanoToMillis(bucketInfo0.start_bucket_elapsed_nanos()),
+ NanoToMillis(createConfigTimeNs));
+ EXPECT_LE(NanoToMillis(bucketInfo0.start_bucket_elapsed_nanos()),
+ NanoToMillis(createConfigTimeNs + ERROR_THRESHOLD_NS));
+
+ EXPECT_EQ(NanoToMillis(bucketInfo0.end_bucket_elapsed_nanos()),
+ NanoToMillis(bucketInfo1.start_bucket_elapsed_nanos()));
+
+ ASSERT_EQ(1, bucketInfo1.atom_size());
+ ASSERT_GT(bucketInfo1.atom(0).subsystem_sleep_state().time_millis(), 0);
+
+ EXPECT_GE(NanoToMillis(bucketInfo1.start_bucket_elapsed_nanos()),
+ NanoToMillis(createConfigTimeNs + kInitDelaySec * NS_PER_SEC));
+ EXPECT_LE(NanoToMillis(bucketInfo1.start_bucket_elapsed_nanos()),
+ NanoToMillis(createConfigTimeNs + kInitDelaySec * NS_PER_SEC + ERROR_THRESHOLD_NS));
+
+ EXPECT_GE(NanoToMillis(createConfigTimeNs + bucketSizeNs),
+ NanoToMillis(bucketInfo1.end_bucket_elapsed_nanos()));
+ EXPECT_LE(NanoToMillis(createConfigTimeNs + bucketSizeNs),
+ NanoToMillis(bucketInfo1.end_bucket_elapsed_nanos() + ERROR_THRESHOLD_NS));
+}
+
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif
diff --git a/statsd/tests/UidMap_test.cpp b/statsd/tests/UidMap_test.cpp
index 25305f7..6d3dd7e 100644
--- a/statsd/tests/UidMap_test.cpp
+++ b/statsd/tests/UidMap_test.cpp
@@ -13,19 +13,22 @@
// limitations under the License.
#include "packages/UidMap.h"
-#include "StatsLogProcessor.h"
-#include "config/ConfigKey.h"
-#include "guardrail/StatsdStats.h"
-#include "logd/LogEvent.h"
-#include "hash.h"
-#include "statslog_statsdtest.h"
-#include "statsd_test_util.h"
#include <android/util/ProtoOutputStream.h>
#include <gtest/gtest.h>
-
+#include <src/uid_data.pb.h>
#include <stdio.h>
+#include "StatsLogProcessor.h"
+#include "StatsService.h"
+#include "config/ConfigKey.h"
+#include "gtest_matchers.h"
+#include "guardrail/StatsdStats.h"
+#include "hash.h"
+#include "logd/LogEvent.h"
+#include "statsd_test_util.h"
+#include "statslog_statsdtest.h"
+
using namespace android;
namespace android {
@@ -34,10 +37,67 @@
using android::util::ProtoOutputStream;
using android::util::ProtoReader;
+using ::ndk::SharedRefBase;
#ifdef __ANDROID__
+
+namespace {
const string kApp1 = "app1.sharing.1";
const string kApp2 = "app2.sharing.1";
+const string kApp3 = "app3";
+
+const vector<int32_t> kUids{1000, 1000, 1500};
+const vector<int64_t> kVersions{4, 5, 6};
+const vector<string> kVersionStrings{"v1", "v1", "v2"};
+const vector<string> kApps{kApp1, kApp2, kApp3};
+const vector<string> kInstallers{"", "", "com.android.vending"};
+const vector<vector<uint8_t>> kCertificateHashes{{'a', 'z'}, {'b', 'c'}, {'d', 'e'}};
+const vector<bool> kDeleted(3, false);
+
+void sendPackagesToStatsd(shared_ptr<StatsService> service, const vector<int32_t>& uids,
+ const vector<int64_t>& versions, const vector<string>& versionStrings,
+ const vector<string>& apps, const vector<string>& installers,
+ const vector<vector<uint8_t>>& certificateHashes) {
+ // Populate UidData from app data.
+ UidData uidData;
+ for (size_t i = 0; i < uids.size(); i++) {
+ ApplicationInfo* appInfo = uidData.add_app_info();
+ appInfo->set_uid(uids[i]);
+ appInfo->set_version(versions[i]);
+ appInfo->set_version_string(versionStrings[i]);
+ appInfo->set_package_name(apps[i]);
+ appInfo->set_installer(installers[i]);
+ appInfo->set_certificate_hash(certificateHashes[i].data(), certificateHashes[i].size());
+ }
+
+ // Create file descriptor from serialized UidData.
+ // Create a file that lives in memory.
+ ScopedFileDescriptor scopedFd(memfd_create("doesn't matter", MFD_CLOEXEC));
+ const int fd = scopedFd.get();
+ int f = fcntl(fd, F_GETFD); // Read the file descriptor flags.
+ ASSERT_NE(-1, f); // Ensure there was no error while reading file descriptor flags.
+ ASSERT_TRUE(f & FD_CLOEXEC);
+ ASSERT_TRUE(uidData.SerializeToFileDescriptor(fd));
+ ASSERT_EQ(0, lseek(fd, 0, SEEK_SET));
+
+ // Send file descriptor containing app data to statsd.
+ service->informAllUidData(scopedFd);
+}
+
+// Returns a vector of the same length as values param. Each i-th element in the returned vector is
+// the index at which values[i] appears in the list denoted by the begin and end iterators.
+template <typename Iterator, typename ValueType>
+vector<uint32_t> computeIndices(const Iterator begin, const Iterator end,
+ const vector<ValueType>& values) {
+ vector<uint32_t> indices;
+ for (const ValueType& value : values) {
+ Iterator it = find(begin, end, value);
+ indices.emplace_back(distance(begin, it));
+ }
+ return indices;
+}
+
+} // anonymous namespace
TEST(UidMapTest, TestIsolatedUID) {
sp<UidMap> m = new UidMap();
@@ -48,7 +108,8 @@
StatsLogProcessor p(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, 0,
[](const ConfigKey& key) { return true; },
- [](const int&, const vector<int64_t>&) { return true; });
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, nullptr);
std::unique_ptr<LogEvent> addEvent = CreateIsolatedUidChangedEvent(
1 /*timestamp*/, 100 /*hostUid*/, 101 /*isolatedUid*/, 1 /*is_create*/);
@@ -62,114 +123,191 @@
EXPECT_EQ(101, m->getHostUidOrSelf(101));
}
-TEST(UidMapTest, TestMatching) {
- UidMap m;
- const vector<int32_t> uids{1000, 1000};
- const vector<int64_t> versions{4, 5};
- const vector<String16> versionStrings{String16("v1"), String16("v1")};
- const vector<String16> apps{String16(kApp1.c_str()), String16(kApp2.c_str())};
- const vector<String16> installers{String16(""), String16("")};
- const vector<vector<uint8_t>> certificateHashes{{}, {}};
+TEST(UidMapTest, TestUpdateMap) {
+ const sp<UidMap> uidMap = new UidMap();
+ const shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ nullptr);
+ sendPackagesToStatsd(service, kUids, kVersions, kVersionStrings, kApps, kInstallers,
+ kCertificateHashes);
- m.updateMap(1 /* timestamp */, uids, versions, versionStrings, apps, installers,
- certificateHashes);
- EXPECT_TRUE(m.hasApp(1000, kApp1));
- EXPECT_TRUE(m.hasApp(1000, kApp2));
- EXPECT_FALSE(m.hasApp(1000, "not.app"));
+ EXPECT_TRUE(uidMap->hasApp(1000, kApp1));
+ EXPECT_TRUE(uidMap->hasApp(1000, kApp2));
+ EXPECT_TRUE(uidMap->hasApp(1500, kApp3));
+ EXPECT_FALSE(uidMap->hasApp(1000, "not.app"));
- std::set<string> name_set = m.getAppNamesFromUid(1000u, true /* returnNormalized */);
- ASSERT_EQ(name_set.size(), 2u);
- EXPECT_TRUE(name_set.find(kApp1) != name_set.end());
- EXPECT_TRUE(name_set.find(kApp2) != name_set.end());
+ std::set<string> name_set = uidMap->getAppNamesFromUid(1000u, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre(kApp1, kApp2));
- name_set = m.getAppNamesFromUid(12345, true /* returnNormalized */);
- EXPECT_TRUE(name_set.empty());
+ name_set = uidMap->getAppNamesFromUid(1500u, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre(kApp3));
+
+ name_set = uidMap->getAppNamesFromUid(12345, true /* returnNormalized */);
+ EXPECT_THAT(name_set, IsEmpty());
+
+ vector<PackageInfo> expectedPackageInfos =
+ buildPackageInfos(kApps, kUids, kVersions, kVersionStrings, kInstallers,
+ kCertificateHashes, kDeleted, /* installerIndices */ {},
+ /* hashStrings */ false);
+
+ PackageInfoSnapshot packageInfoSnapshot = getPackageInfoSnapshot(uidMap);
+
+ EXPECT_THAT(packageInfoSnapshot.package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
}
-TEST(UidMapTest, TestAddAndRemove) {
- UidMap m;
- const vector<int32_t> uids{1000, 1000};
- const vector<int64_t> versions{4, 5};
- const vector<String16> versionStrings{String16("v1"), String16("v1")};
- const vector<String16> apps{String16(kApp1.c_str()), String16(kApp2.c_str())};
- const vector<String16> installers{String16(""), String16("")};
- const vector<vector<uint8_t>> certificateHashes{{}, {}};
+TEST(UidMapTest, TestUpdateMapMultiple) {
+ const sp<UidMap> uidMap = new UidMap();
+ const shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ nullptr);
+ sendPackagesToStatsd(service, kUids, kVersions, kVersionStrings, kApps, kInstallers,
+ kCertificateHashes);
- m.updateMap(1 /* timestamp */, uids, versions, versionStrings, apps, installers,
- certificateHashes);
+ // Remove kApp3, and add NewApp
+ vector<int32_t> uids(kUids);
+ uids.back() = 2000;
+ vector<string> apps(kApps);
+ apps.back() = "NewApp";
+ vector<string> installers(kInstallers);
+ installers.back() = "NewInstaller";
- std::set<string> name_set = m.getAppNamesFromUid(1000, true /* returnNormalized */);
- ASSERT_EQ(name_set.size(), 2u);
- EXPECT_TRUE(name_set.find(kApp1) != name_set.end());
- EXPECT_TRUE(name_set.find(kApp2) != name_set.end());
+ sendPackagesToStatsd(service, uids, kVersions, kVersionStrings, apps, installers,
+ kCertificateHashes);
- // Update the app1 version.
- m.updateApp(2, String16(kApp1.c_str()), 1000, 40, String16("v40"), String16(""),
- /* certificateHash */ {});
- EXPECT_EQ(40, m.getAppVersion(1000, kApp1));
+ EXPECT_TRUE(uidMap->hasApp(1000, kApp1));
+ EXPECT_TRUE(uidMap->hasApp(1000, kApp2));
+ EXPECT_TRUE(uidMap->hasApp(2000, "NewApp"));
+ EXPECT_FALSE(uidMap->hasApp(1500, kApp3));
+ EXPECT_FALSE(uidMap->hasApp(1000, "not.app"));
- name_set = m.getAppNamesFromUid(1000, true /* returnNormalized */);
- ASSERT_EQ(name_set.size(), 2u);
- EXPECT_TRUE(name_set.find(kApp1) != name_set.end());
- EXPECT_TRUE(name_set.find(kApp2) != name_set.end());
+ std::set<string> name_set = uidMap->getAppNamesFromUid(1000u, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre(kApp1, kApp2));
- m.removeApp(3, String16(kApp1.c_str()), 1000);
- EXPECT_FALSE(m.hasApp(1000, kApp1));
- EXPECT_TRUE(m.hasApp(1000, kApp2));
- name_set = m.getAppNamesFromUid(1000, true /* returnNormalized */);
- ASSERT_EQ(name_set.size(), 1u);
- EXPECT_TRUE(name_set.find(kApp1) == name_set.end());
- EXPECT_TRUE(name_set.find(kApp2) != name_set.end());
+ name_set = uidMap->getAppNamesFromUid(2000, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre("newapp"));
- // Remove app2.
- m.removeApp(4, String16(kApp2.c_str()), 1000);
- EXPECT_FALSE(m.hasApp(1000, kApp1));
- EXPECT_FALSE(m.hasApp(1000, kApp2));
- name_set = m.getAppNamesFromUid(1000, true /* returnNormalized */);
- EXPECT_TRUE(name_set.empty());
+ name_set = uidMap->getAppNamesFromUid(1500, true /* returnNormalized */);
+ EXPECT_THAT(name_set, IsEmpty());
+
+ vector<PackageInfo> expectedPackageInfos =
+ buildPackageInfos(apps, uids, kVersions, kVersionStrings, installers,
+ kCertificateHashes, kDeleted, /* installerIndices */ {},
+ /* hashStrings */ false);
+
+ PackageInfoSnapshot packageInfoSnapshot = getPackageInfoSnapshot(uidMap);
+
+ EXPECT_THAT(packageInfoSnapshot.package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
+}
+
+TEST(UidMapTest, TestRemoveApp) {
+ const sp<UidMap> uidMap = new UidMap();
+ const shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ nullptr);
+ sendPackagesToStatsd(service, kUids, kVersions, kVersionStrings, kApps, kInstallers,
+ kCertificateHashes);
+
+ service->informOnePackageRemoved(kApp1, 1000);
+ EXPECT_FALSE(uidMap->hasApp(1000, kApp1));
+ EXPECT_TRUE(uidMap->hasApp(1000, kApp2));
+ EXPECT_TRUE(uidMap->hasApp(1500, kApp3));
+ std::set<string> name_set = uidMap->getAppNamesFromUid(1000, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre(kApp2));
+
+ vector<bool> deleted(kDeleted);
+ deleted[0] = true;
+ vector<PackageInfo> expectedPackageInfos =
+ buildPackageInfos(kApps, kUids, kVersions, kVersionStrings, kInstallers,
+ kCertificateHashes, deleted, /* installerIndices */ {},
+ /* hashStrings */ false);
+ PackageInfoSnapshot packageInfoSnapshot = getPackageInfoSnapshot(uidMap);
+ EXPECT_THAT(packageInfoSnapshot.package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
+
+ service->informOnePackageRemoved(kApp2, 1000);
+ EXPECT_FALSE(uidMap->hasApp(1000, kApp1));
+ EXPECT_FALSE(uidMap->hasApp(1000, kApp2));
+ EXPECT_TRUE(uidMap->hasApp(1500, kApp3));
+ EXPECT_FALSE(uidMap->hasApp(1000, "not.app"));
+ name_set = uidMap->getAppNamesFromUid(1000, true /* returnNormalized */);
+ EXPECT_THAT(name_set, IsEmpty());
+
+ deleted[1] = true;
+ expectedPackageInfos = buildPackageInfos(kApps, kUids, kVersions, kVersionStrings, kInstallers,
+ kCertificateHashes, deleted, /* installerIndices */ {},
+ /* hashStrings */ false);
+ packageInfoSnapshot = getPackageInfoSnapshot(uidMap);
+ EXPECT_THAT(packageInfoSnapshot.package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
+
+ service->informOnePackageRemoved(kApp3, 1500);
+ EXPECT_FALSE(uidMap->hasApp(1000, kApp1));
+ EXPECT_FALSE(uidMap->hasApp(1000, kApp2));
+ EXPECT_FALSE(uidMap->hasApp(1500, kApp3));
+ EXPECT_FALSE(uidMap->hasApp(1000, "not.app"));
+ name_set = uidMap->getAppNamesFromUid(1500, true /* returnNormalized */);
+ EXPECT_THAT(name_set, IsEmpty());
+
+ deleted[2] = true;
+ expectedPackageInfos = buildPackageInfos(kApps, kUids, kVersions, kVersionStrings, kInstallers,
+ kCertificateHashes, deleted, /* installerIndices */ {},
+ /* hashStrings */ false);
+ packageInfoSnapshot = getPackageInfoSnapshot(uidMap);
+ EXPECT_THAT(packageInfoSnapshot.package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
}
TEST(UidMapTest, TestUpdateApp) {
- UidMap m;
- m.updateMap(1, {1000, 1000}, {4, 5}, {String16("v4"), String16("v5")},
- {String16(kApp1.c_str()), String16(kApp2.c_str())}, {String16(""), String16("")},
- /* certificateHash */ {{}, {}});
- std::set<string> name_set = m.getAppNamesFromUid(1000, true /* returnNormalized */);
- ASSERT_EQ(name_set.size(), 2u);
- EXPECT_TRUE(name_set.find(kApp1) != name_set.end());
- EXPECT_TRUE(name_set.find(kApp2) != name_set.end());
+ const sp<UidMap> uidMap = new UidMap();
+ const shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ nullptr);
+ sendPackagesToStatsd(service, kUids, kVersions, kVersionStrings, kApps, kInstallers,
+ kCertificateHashes);
- // Adds a new name for uid 1000.
- m.updateApp(2, String16("NeW_aPP1_NAmE"), 1000, 40, String16("v40"), String16(""),
- /* certificateHash */ {});
- name_set = m.getAppNamesFromUid(1000, true /* returnNormalized */);
- ASSERT_EQ(name_set.size(), 3u);
- EXPECT_TRUE(name_set.find(kApp1) != name_set.end());
- EXPECT_TRUE(name_set.find(kApp2) != name_set.end());
- EXPECT_TRUE(name_set.find("NeW_aPP1_NAmE") == name_set.end());
- EXPECT_TRUE(name_set.find("new_app1_name") != name_set.end());
+ // Update app1 version.
+ service->informOnePackage(kApps[0].c_str(), kUids[0], /* version */ 40,
+ /* versionString */ "v40", kInstallers[0], kCertificateHashes[0]);
+ EXPECT_THAT(uidMap->getAppVersion(kUids[0], kApps[0]), Eq(40));
+ std::set<string> name_set = uidMap->getAppNamesFromUid(1000, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre(kApp1, kApp2));
- // This name is also reused by another uid 2000.
- m.updateApp(3, String16("NeW_aPP1_NAmE"), 2000, 1, String16("v1"), String16(""),
- /* certificateHash */ {});
- name_set = m.getAppNamesFromUid(2000, true /* returnNormalized */);
- ASSERT_EQ(name_set.size(), 1u);
- EXPECT_TRUE(name_set.find("NeW_aPP1_NAmE") == name_set.end());
- EXPECT_TRUE(name_set.find("new_app1_name") != name_set.end());
-}
+ // Add a new name for uid 1000.
+ service->informOnePackage("NeW_aPP1_NAmE", 1000, /* version */ 40,
+ /* versionString */ "v40", /* installer */ "com.android.vending",
+ /* certificateHash */ {'a'});
+ name_set = uidMap->getAppNamesFromUid(1000, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre(kApp1, kApp2, "new_app1_name"));
-static void protoOutputStreamToUidMapping(ProtoOutputStream* proto, UidMapping* results) {
- vector<uint8_t> bytes;
- bytes.resize(proto->size());
- size_t pos = 0;
- sp<ProtoReader> reader = proto->data();
- while (reader->readBuffer() != NULL) {
- size_t toRead = reader->currentToRead();
- std::memcpy(&((bytes)[pos]), reader->readBuffer(), toRead);
- pos += toRead;
- reader->move(toRead);
- }
- results->ParseFromArray(bytes.data(), bytes.size());
+ // Re-add the same name for another uid 2000
+ service->informOnePackage("NeW_aPP1_NAmE", 2000, /* version */ 1,
+ /* versionString */ "v1", /* installer */ "",
+ /* certificateHash */ {'b'});
+ name_set = uidMap->getAppNamesFromUid(2000, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre("new_app1_name"));
+
+ // Re-add existing package with different installer
+ service->informOnePackage("NeW_aPP1_NAmE", 2000, /* version */ 1,
+ /* versionString */ "v1", /* installer */ "new_installer",
+ /* certificateHash */ {'b'});
+ name_set = uidMap->getAppNamesFromUid(2000, true /* returnNormalized */);
+ EXPECT_THAT(name_set, UnorderedElementsAre("new_app1_name"));
+
+ vector<int32_t> uids = concatenate(kUids, {1000, 2000});
+ vector<int64_t> versions = concatenate(kVersions, {40, 1});
+ versions[0] = 40;
+ vector<string> versionStrings = concatenate(kVersionStrings, {"v40", "v1"});
+ versionStrings[0] = "v40";
+ vector<string> apps = concatenate(kApps, {"NeW_aPP1_NAmE", "NeW_aPP1_NAmE"});
+ vector<string> installers = concatenate(kInstallers, {"com.android.vending", "new_installer"});
+ vector<bool> deleted = concatenate(kDeleted, {false, false});
+ vector<vector<uint8_t>> certHashes = concatenate(kCertificateHashes, {{'a'}, {'b'}});
+ vector<PackageInfo> expectedPackageInfos =
+ buildPackageInfos(apps, uids, versions, versionStrings, installers, certHashes, deleted,
+ /* installerIndices */ {},
+ /* hashStrings */ false);
+
+ PackageInfoSnapshot packageInfoSnapshot = getPackageInfoSnapshot(uidMap);
+ EXPECT_THAT(packageInfoSnapshot.package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
}
// Test that uid map returns at least one snapshot even if we already obtained
@@ -199,7 +337,7 @@
// Check there's still a uidmap attached this one.
UidMapping results;
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(1, results.snapshots_size());
EXPECT_EQ("v1", results.snapshots(0).package_info(0).version_string());
}
@@ -227,7 +365,7 @@
// Snapshot should still contain this item as deleted.
UidMapping results;
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(1, results.snapshots(0).package_info_size());
EXPECT_EQ(true, results.snapshots(0).package_info(0).deleted());
}
@@ -261,7 +399,7 @@
m.appendUidMap(/* timestamp */ 3, config1, /* includeVersionStrings */ true,
/* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
/* str_set */ nullptr, &proto);
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(maxDeletedApps + 10, results.snapshots(0).package_info_size());
// Now remove all the apps.
@@ -276,7 +414,7 @@
/* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
/* str_set */ nullptr, &proto);
// Snapshot drops the first nine items.
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(maxDeletedApps, results.snapshots(0).package_info_size());
}
@@ -302,7 +440,7 @@
/* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
/* str_set */ nullptr, &proto);
UidMapping results;
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(1, results.snapshots_size());
// We have to keep at least one snapshot in memory at all times.
@@ -310,7 +448,7 @@
m.appendUidMap(/* timestamp */ 2, config1, /* includeVersionStrings */ true,
/* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
/* str_set */ nullptr, &proto);
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(1, results.snapshots_size());
// Now add another configuration.
@@ -322,7 +460,7 @@
m.appendUidMap(/* timestamp */ 6, config1, /* includeVersionStrings */ true,
/* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
/* str_set */ nullptr, &proto);
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(1, results.snapshots_size());
ASSERT_EQ(1, results.changes_size());
ASSERT_EQ(1U, m.mChanges.size());
@@ -337,7 +475,7 @@
m.appendUidMap(/* timestamp */ 8, config1, /* includeVersionStrings */ true,
/* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
/* str_set */ nullptr, &proto);
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(1, results.snapshots_size());
ASSERT_EQ(1, results.changes_size());
ASSERT_EQ(2U, m.mChanges.size());
@@ -346,7 +484,7 @@
m.appendUidMap(/* timestamp */ 9, config2, /* includeVersionStrings */ true,
/* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
/* str_set */ nullptr, &proto);
- protoOutputStreamToUidMapping(&proto, &results);
+ outputStreamToProto(&proto, &results);
ASSERT_EQ(1, results.snapshots_size());
ASSERT_EQ(2, results.changes_size());
// At this point both should be cleared.
@@ -373,7 +511,6 @@
/* certificateHash */ {});
ProtoOutputStream proto;
- vector<uint8_t> bytes;
m.appendUidMap(/* timestamp */ 2, config1, /* includeVersionStrings */ true,
/* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
/* str_set */ nullptr, &proto);
@@ -422,6 +559,179 @@
ASSERT_EQ(1U, m.mChanges.size());
}
+class UidMapTestAppendUidMap : public Test {
+protected:
+ const ConfigKey config1;
+ const sp<UidMap> uidMap;
+ const shared_ptr<StatsService> service;
+
+ set<string> installersSet;
+ set<uint64_t> installerHashSet;
+ vector<uint64_t> installerHashes;
+
+ UidMapTestAppendUidMap()
+ : config1(1, StringToId("config1")),
+ uidMap(new UidMap()),
+ service(SharedRefBase::make<StatsService>(uidMap, /* queue */ nullptr,
+ /* LogEventFilter */ nullptr)) {
+ }
+
+ void SetUp() override {
+ sendPackagesToStatsd(service, kUids, kVersions, kVersionStrings, kApps, kInstallers,
+ kCertificateHashes);
+
+ for (const string& installer : kInstallers) {
+ installersSet.insert(installer);
+ uint64_t installerHash = Hash64(installer);
+ installerHashes.emplace_back(installerHash);
+ installerHashSet.insert(installerHash);
+ }
+ }
+};
+
+TEST_F(UidMapTestAppendUidMap, TestInstallersInReportIncludeInstallerAndHashStrings) {
+ ProtoOutputStream proto;
+ set<string> strSet;
+ uidMap->appendUidMap(/* timestamp */ 3, config1, /* includeVersionStrings */ true,
+ /* includeInstaller */ true, /* truncatedCertificateHashSize */ 0, &strSet,
+ &proto);
+
+ UidMapping results;
+ outputStreamToProto(&proto, &results);
+
+ // Verify hashes for all installers are in the installer_hash list.
+ EXPECT_THAT(results.installer_hash(), UnorderedElementsAreArray(installerHashSet));
+
+ EXPECT_THAT(results.installer_name(), IsEmpty());
+
+ // Verify all installer names are added to the strSet argument.
+ EXPECT_THAT(strSet, IsSupersetOf(installersSet));
+
+ ASSERT_THAT(results.snapshots_size(), Eq(1));
+
+ // Compute installer indices for each package.
+ // Find the location of each installerHash from the input in the results.
+ // installerIndices[i] is the index in results.installer_hash() that matches installerHashes[i].
+ vector<uint32_t> installerIndices = computeIndices(
+ results.installer_hash().begin(), results.installer_hash().end(), installerHashes);
+
+ vector<PackageInfo> expectedPackageInfos =
+ buildPackageInfos(kApps, kUids, kVersions, kVersionStrings, kInstallers,
+ /* certHashes */ {}, kDeleted, installerIndices,
+ /* hashStrings */ true);
+
+ EXPECT_THAT(strSet, IsSupersetOf(kApps));
+
+ EXPECT_THAT(results.snapshots(0).package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
+}
+
+TEST_F(UidMapTestAppendUidMap, TestInstallersInReportIncludeInstallerAndDontHashStrings) {
+ ProtoOutputStream proto;
+ uidMap->appendUidMap(/* timestamp */ 3, config1, /* includeVersionStrings */ true,
+ /* includeInstaller */ true, /* truncatedCertificateHashSize */ 0,
+ /* str_set */ nullptr, &proto);
+
+ UidMapping results;
+ outputStreamToProto(&proto, &results);
+
+ // Verify all installers are in the installer_name list.
+ EXPECT_THAT(results.installer_name(), UnorderedElementsAreArray(installersSet));
+
+ EXPECT_THAT(results.installer_hash(), IsEmpty());
+
+ ASSERT_THAT(results.snapshots_size(), Eq(1));
+
+ vector<uint32_t> installerIndices = computeIndices(results.installer_name().begin(),
+ results.installer_name().end(), kInstallers);
+
+ vector<PackageInfo> expectedPackageInfos =
+ buildPackageInfos(kApps, kUids, kVersions, kVersionStrings, kInstallers,
+ /* certHashes */ {}, kDeleted, installerIndices,
+ /* hashStrings */ false);
+
+ EXPECT_THAT(results.snapshots(0).package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
+}
+
+// Set up parameterized test with set<string>* parameter to control whether strings are hashed
+// or not in the report. A value of nullptr indicates strings should not be hashed and non-null
+// values indicates strings are hashed in the report and the original strings are added to this set.
+class UidMapTestAppendUidMapHashStrings : public UidMapTestAppendUidMap,
+ public WithParamInterface<set<string>*> {
+public:
+ inline static set<string> strSet;
+
+protected:
+ void SetUp() override {
+ strSet.clear();
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(
+ HashStrings, UidMapTestAppendUidMapHashStrings,
+ Values(nullptr, &(UidMapTestAppendUidMapHashStrings::strSet)),
+ [](const TestParamInfo<UidMapTestAppendUidMapHashStrings::ParamType>& info) {
+ return info.param == nullptr ? "NoHashStrings" : "HashStrings";
+ });
+
+TEST_P(UidMapTestAppendUidMapHashStrings, TestNoIncludeInstallersInReport) {
+ ProtoOutputStream proto;
+ uidMap->appendUidMap(/* timestamp */ 3, config1, /* includeVersionStrings */ true,
+ /* includeInstaller */ false, /* truncatedCertificateHashSize */ 0,
+ /* str_set */ GetParam(), &proto);
+
+ UidMapping results;
+ outputStreamToProto(&proto, &results);
+
+ // Verify installer lists are empty.
+ EXPECT_THAT(results.installer_name(), IsEmpty());
+ EXPECT_THAT(results.installer_hash(), IsEmpty());
+
+ ASSERT_THAT(results.snapshots_size(), Eq(1));
+
+ // Verify that none of installer, installer_hash, installer_index fields in PackageInfo are
+ // populated.
+ EXPECT_THAT(results.snapshots(0).package_info(),
+ Each(Property(&PackageInfo::has_installer, IsFalse())));
+ EXPECT_THAT(results.snapshots(0).package_info(),
+ Each(Property(&PackageInfo::has_installer_hash, IsFalse())));
+ EXPECT_THAT(results.snapshots(0).package_info(),
+ Each(Property(&PackageInfo::has_installer_index, IsFalse())));
+}
+
+// Set up parameterized test for testing with different truncation hash sizes for the certificates.
+class UidMapTestTruncateCertificateHash : public UidMapTestAppendUidMap,
+ public WithParamInterface<uint8_t> {};
+
+INSTANTIATE_TEST_SUITE_P(ZeroOneTwoThree, UidMapTestTruncateCertificateHash,
+ Range(uint8_t{0}, uint8_t{4}));
+
+TEST_P(UidMapTestTruncateCertificateHash, TestCertificateHashesTruncated) {
+ const uint8_t hashSize = GetParam();
+ ProtoOutputStream proto;
+ uidMap->appendUidMap(/* timestamp */ 3, config1, /* includeVersionStrings */ true,
+ /* includeInstaller */ false, hashSize, /* str_set */ nullptr, &proto);
+
+ UidMapping results;
+ outputStreamToProto(&proto, &results);
+
+ ASSERT_THAT(results.snapshots_size(), Eq(1));
+
+ vector<vector<uint8_t>> certHashes = kCertificateHashes;
+ for (vector<uint8_t>& certHash : certHashes) {
+ certHash.resize(certHash.size() < hashSize ? certHash.size() : hashSize);
+ }
+ vector<PackageInfo> expectedPackageInfos =
+ buildPackageInfos(kApps, kUids, kVersions, kVersionStrings,
+ /* installers */ {}, certHashes, kDeleted,
+ /* installerIndices*/ {},
+ /* hashStrings */ false);
+
+ EXPECT_THAT(results.snapshots(0).package_info(),
+ UnorderedPointwise(EqPackageInfo(), expectedPackageInfos));
+}
+
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif
diff --git a/statsd/tests/e2e/Anomaly_count_e2e_test.cpp b/statsd/tests/e2e/Anomaly_count_e2e_test.cpp
index ec74df6..eccaafe 100644
--- a/statsd/tests/e2e/Anomaly_count_e2e_test.cpp
+++ b/statsd/tests/e2e/Anomaly_count_e2e_test.cpp
@@ -54,7 +54,7 @@
} // namespace
-TEST(AnomalyDetectionE2eTest, TestSlicedCountMetric_single_bucket) {
+TEST(AnomalyCountDetectionE2eTest, TestSlicedCountMetric_single_bucket) {
const int num_buckets = 1;
const int threshold = 3;
const int refractory_period_sec = 10;
@@ -171,7 +171,7 @@
anomalyTracker->getRefractoryPeriodEndsSec(dimensionKey2));
}
-TEST(AnomalyDetectionE2eTest, TestSlicedCountMetric_multiple_buckets) {
+TEST(AnomalyCountDetectionE2eTest, TestSlicedCountMetric_multiple_buckets) {
const int num_buckets = 3;
const int threshold = 3;
const int refractory_period_sec = 10;
@@ -241,7 +241,7 @@
anomalyTracker->getRefractoryPeriodEndsSec(dimensionKey1));
}
-TEST(AnomalyDetectionE2eTest, TestCountMetric_save_refractory_to_disk_no_data_written) {
+TEST(AnomalyCountDetectionE2eTest, TestCountMetric_save_refractory_to_disk_no_data_written) {
const int num_buckets = 1;
const int threshold = 0;
const int refractory_period_sec = 86400 * 365; // 1 year
@@ -266,7 +266,7 @@
ASSERT_EQ(result.stats_metadata_size(), 0);
}
-TEST(AnomalyDetectionE2eTest, TestCountMetric_save_refractory_to_disk) {
+TEST(AnomalyCountDetectionE2eTest, TestCountMetric_save_refractory_to_disk) {
const int num_buckets = 1;
const int threshold = 0;
const int refractory_period_sec = 86400 * 365; // 1 year
@@ -329,7 +329,7 @@
EXPECT_EQ(dimKeyInWhat.value_int(), fieldValue1.mValue.int_value);
}
-TEST(AnomalyDetectionE2eTest, TestCountMetric_load_refractory_from_disk) {
+TEST(AnomalyCountDetectionE2eTest, TestCountMetric_load_refractory_from_disk) {
const int num_buckets = 1;
const int threshold = 0;
const int refractory_period_sec = 86400 * 365; // 1 year
diff --git a/statsd/tests/e2e/Anomaly_duration_sum_e2e_test.cpp b/statsd/tests/e2e/Anomaly_duration_sum_e2e_test.cpp
index fd6ebde..04b473c 100644
--- a/statsd/tests/e2e/Anomaly_duration_sum_e2e_test.cpp
+++ b/statsd/tests/e2e/Anomaly_duration_sum_e2e_test.cpp
@@ -21,6 +21,7 @@
#include "src/StatsLogProcessor.h"
#include "src/StatsService.h"
#include "src/anomaly/DurationAnomalyTracker.h"
+#include "src/packages/UidMap.h"
#include "src/stats_log_util.h"
#include "tests/statsd_test_util.h"
@@ -34,9 +35,6 @@
namespace {
-const int kConfigKey = 789130124;
-const int kCallingUid = 0;
-
StatsdConfig CreateStatsdConfig(int num_buckets,
uint64_t threshold_ns,
DurationMetric::AggregationType aggregationType,
@@ -97,24 +95,19 @@
(int32_t)0x02010101), Value((int32_t)222))}),
DEFAULT_DIMENSION_KEY);
-void sendConfig(shared_ptr<StatsService>& service, const StatsdConfig& config) {
- string str;
- config.SerializeToString(&str);
- std::vector<uint8_t> configAsVec(str.begin(), str.end());
- service->addConfiguration(kConfigKey, configAsVec, kCallingUid);
-}
-
} // namespace
-TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_single_bucket) {
+// Setup for test fixture.
+class AnomalyDurationDetectionE2eTest : public StatsServiceConfigTest {};
+
+TEST_F(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_single_bucket) {
const int num_buckets = 1;
const uint64_t threshold_ns = NS_PER_SEC;
auto config = CreateStatsdConfig(num_buckets, threshold_ns, DurationMetric::SUM, true);
const uint64_t alert_id = config.alert(0).id();
const uint32_t refractory_period_sec = config.alert(0).refractory_period_secs();
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- sendConfig(service, config);
+ sendConfig(config);
auto processor = service->mProcessor;
ASSERT_EQ(processor->mMetricsManagers.size(), 1u);
@@ -289,15 +282,14 @@
EXPECT_EQ(0u, anomalyTracker->getAlarmTimestampSec(dimensionKey1));
}
-TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets) {
+TEST_F(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_multiple_buckets) {
const int num_buckets = 3;
const uint64_t threshold_ns = NS_PER_SEC;
auto config = CreateStatsdConfig(num_buckets, threshold_ns, DurationMetric::SUM, true);
const uint64_t alert_id = config.alert(0).id();
const uint32_t refractory_period_sec = config.alert(0).refractory_period_secs();
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- sendConfig(service, config);
+ sendConfig(config);
auto processor = service->mProcessor;
ASSERT_EQ(processor->mMetricsManagers.size(), 1u);
@@ -412,15 +404,14 @@
anomalyTracker->getRefractoryPeriodEndsSec(dimensionKey1));
}
-TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_partial_bucket) {
+TEST_F(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_partial_bucket) {
const int num_buckets = 1;
const uint64_t threshold_ns = NS_PER_SEC;
auto config = CreateStatsdConfig(num_buckets, threshold_ns, DurationMetric::SUM, true);
const uint64_t alert_id = config.alert(0).id();
const uint32_t refractory_period_sec = config.alert(0).refractory_period_secs();
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- sendConfig(service, config);
+ sendConfig(config);
auto processor = service->mProcessor;
ASSERT_EQ(processor->mMetricsManagers.size(), 1u);
@@ -501,7 +492,7 @@
anomalyTracker->getRefractoryPeriodEndsSec(dimensionKey1));
}
-TEST(AnomalyDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period) {
+TEST_F(AnomalyDurationDetectionE2eTest, TestDurationMetric_SUM_long_refractory_period) {
const int num_buckets = 2;
const uint64_t threshold_ns = 3 * NS_PER_SEC;
auto config = CreateStatsdConfig(num_buckets, threshold_ns, DurationMetric::SUM, false);
@@ -510,8 +501,7 @@
const uint32_t refractory_period_sec = 3 * bucketSizeNs / NS_PER_SEC;
config.mutable_alert(0)->set_refractory_period_secs(refractory_period_sec);
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- sendConfig(service, config);
+ sendConfig(config);
auto processor = service->mProcessor;
ASSERT_EQ(processor->mMetricsManagers.size(), 1u);
diff --git a/statsd/tests/e2e/ConfigUpdate_e2e_ab_test.cpp b/statsd/tests/e2e/ConfigUpdate_e2e_ab_test.cpp
index 92955bb..e078f47 100644
--- a/statsd/tests/e2e/ConfigUpdate_e2e_ab_test.cpp
+++ b/statsd/tests/e2e/ConfigUpdate_e2e_ab_test.cpp
@@ -98,7 +98,8 @@
ASSERT_EQ(uidMapping.snapshots_size(), 1);
ASSERT_EQ(uidMapping.snapshots(0).package_info_size(), 1);
EXPECT_FALSE(uidMapping.snapshots(0).package_info(0).has_version_string());
- EXPECT_EQ(uidMapping.snapshots(0).package_info(0).installer(), "installer1");
+ EXPECT_EQ(uidMapping.snapshots(0).package_info(0).installer_index(), 0);
+ EXPECT_THAT(uidMapping.installer_name(), ElementsAre("installer1"));
}
TEST_P(ConfigUpdateE2eAbTest, TestHashStrings) {
diff --git a/statsd/tests/e2e/ConfigUpdate_e2e_test.cpp b/statsd/tests/e2e/ConfigUpdate_e2e_test.cpp
index d83a39d..ead1c46 100644
--- a/statsd/tests/e2e/ConfigUpdate_e2e_test.cpp
+++ b/statsd/tests/e2e/ConfigUpdate_e2e_test.cpp
@@ -47,12 +47,44 @@
EXPECT_EQ(value.value_tuple().dimensions_value(0).value_str(), name);
}
+sp<StatsLogProcessor> CreateStatsLogProcessor(
+ const int64_t timeBaseNs, const int64_t currentTimeNs, const StatsdConfig& config,
+ const ConfigKey& key, const shared_ptr<MockLogEventFilter>& logEventFilter) {
+ if (logEventFilter) {
+ // call from StatsLogProcessor constructor
+ Expectation initCall = EXPECT_CALL(*logEventFilter,
+ setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ EXPECT_CALL(*logEventFilter, setAtomIds(CreateAtomIdSetFromConfig(config), _))
+ .Times(1)
+ .After(initCall);
+ }
+
+ return CreateStatsLogProcessor(timeBaseNs, currentTimeNs, config, key, nullptr, 0, new UidMap(),
+ logEventFilter);
+}
+
} // Anonymous namespace.
// Setup for test fixture.
-class ConfigUpdateE2eTest : public ::testing::Test {};
+class ConfigUpdateE2eTest : public testing::TestWithParam<bool> {
+protected:
+ std::shared_ptr<MockLogEventFilter> mLogEventFilter;
-TEST_F(ConfigUpdateE2eTest, TestEventMetric) {
+ void SetUp() override {
+ mLogEventFilter = GetParam() ? std::make_shared<MockLogEventFilter>() : nullptr;
+ }
+
+public:
+ static std::string ToString(testing::TestParamInfo<bool> info) {
+ return info.param ? "WithLogEventFilter" : "NoLogEventFilter";
+ }
+};
+
+INSTANTIATE_TEST_SUITE_P(ConfigUpdateE2eTest, ConfigUpdateE2eTest, testing::Bool(),
+ ConfigUpdateE2eTest::ToString);
+
+TEST_P(ConfigUpdateE2eTest, TestEventMetric) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
@@ -99,8 +131,8 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = 10000000000; // 0:10
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
int app1Uid = 123;
vector<int> attributionUids1 = {app1Uid};
@@ -161,6 +193,11 @@
*newConfig.add_event_metric() = eventPersist;
int64_t updateTimeNs = bucketStartTimeNs + 60 * NS_PER_SEC;
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(newConfig), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// Send events after the update.
@@ -274,7 +311,7 @@
EXPECT_EQ(data.atom().sync_state_changed().sync_name(), "sync3");
}
-TEST_F(ConfigUpdateE2eTest, TestCountMetric) {
+TEST_P(ConfigUpdateE2eTest, TestCountMetric) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
@@ -336,8 +373,8 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = 10000000000; // 0:10
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(TEN_MINUTES) * 1000000LL;
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
int app1Uid = 123, app2Uid = 456;
vector<int> attributionUids1 = {app1Uid};
@@ -399,6 +436,11 @@
*newConfig.add_count_metric() = countPersist;
int64_t updateTimeNs = bucketStartTimeNs + 60 * NS_PER_SEC;
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(newConfig), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// Send events after the update. Counts reset to 0 since this is a new bucket.
@@ -478,7 +520,7 @@
ASSERT_EQ(countChangeBefore.count_metrics().data_size(), 1);
data = countChangeBefore.count_metrics().data(0);
ASSERT_EQ(data.bucket_info_size(), 1);
- ValidateCountBucket(data.bucket_info(0), bucketStartTimeNs, updateTimeNs, 4);
+ ValidateCountBucket(data.bucket_info(0), bucketStartTimeNs, updateTimeNs, 4, 50 * NS_PER_SEC);
// Report from after update.
report = reports.reports(1);
@@ -490,7 +532,8 @@
ASSERT_EQ(countChangeAfter.count_metrics().data_size(), 1);
data = countChangeAfter.count_metrics().data(0);
ASSERT_EQ(data.bucket_info_size(), 1);
- ValidateCountBucket(data.bucket_info(0), updateTimeNs, bucketStartTimeNs + bucketSizeNs, 1);
+ ValidateCountBucket(data.bucket_info(0), updateTimeNs, bucketStartTimeNs + bucketSizeNs, 1,
+ 530 * NS_PER_SEC);
// Count wl acquires while screen on. There were 2, one in each bucket.
StatsLogReport countNewAfter = report.metrics(1);
@@ -499,8 +542,10 @@
ASSERT_EQ(countNewAfter.count_metrics().data_size(), 1);
data = countNewAfter.count_metrics().data(0);
ASSERT_EQ(data.bucket_info_size(), 2);
- ValidateCountBucket(data.bucket_info(0), updateTimeNs, bucketStartTimeNs + bucketSizeNs, 1);
- ValidateCountBucket(data.bucket_info(1), bucketStartTimeNs + bucketSizeNs, dumpTimeNs, 1);
+ ValidateCountBucket(data.bucket_info(0), updateTimeNs, bucketStartTimeNs + bucketSizeNs, 1,
+ 530 * NS_PER_SEC);
+ ValidateCountBucket(data.bucket_info(1), bucketStartTimeNs + bucketSizeNs, dumpTimeNs, 1,
+ 10 * NS_PER_SEC);
// Uid 1 had 1 sync, uid 2 had 2 syncs.
StatsLogReport countPersistAfter = report.metrics(2);
@@ -524,7 +569,7 @@
ValidateCountBucket(data.bucket_info(0), updateTimeNs, bucketStartTimeNs + bucketSizeNs, 2);
}
-TEST_F(ConfigUpdateE2eTest, TestDurationMetric) {
+TEST_P(ConfigUpdateE2eTest, TestDurationMetric) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
@@ -604,8 +649,8 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = 10000000000; // 0:10
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(TEN_MINUTES) * 1000000LL;
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
int app1Uid = 123, app2Uid = 456, app3Uid = 789;
vector<int> attributionUids1 = {app1Uid};
@@ -680,6 +725,11 @@
// At update, only uid 1 is syncing & holding a wakelock, duration=33. Max is paused for uid3.
// Before the update, only uid2 will report a duration for max, since others are started/paused.
int64_t updateTimeNs = bucketStartTimeNs + 60 * NS_PER_SEC;
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(newConfig), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// Send events after the update.
@@ -782,7 +832,8 @@
ASSERT_EQ(durationMetrics.data_size(), 1);
data = durationMetrics.data(0);
ASSERT_EQ(data.bucket_info_size(), 1);
- ValidateDurationBucket(data.bucket_info(0), bucketStartTimeNs, updateTimeNs, 30 * NS_PER_SEC);
+ ValidateDurationBucket(data.bucket_info(0), bucketStartTimeNs, updateTimeNs, 30 * NS_PER_SEC,
+ 35000000000);
// Report from after update.
report = reports.reports(1);
@@ -796,7 +847,8 @@
ASSERT_EQ(durationMetrics.data_size(), 1);
data = durationMetrics.data(0);
ASSERT_EQ(data.bucket_info_size(), 1);
- ValidateDurationBucket(data.bucket_info(0), updateTimeNs, bucketEndTimeNs, 21 * NS_PER_SEC);
+ ValidateDurationBucket(data.bucket_info(0), updateTimeNs, bucketEndTimeNs, 21 * NS_PER_SEC,
+ 21000000000);
// Duration of syncs. Always true since at least 1 uid is always syncing.
StatsLogReport durationNewAfter = report.metrics(1);
@@ -875,7 +927,7 @@
ValidateDurationBucket(data.bucket_info(0), updateTimeNs, bucketEndTimeNs, 20 * NS_PER_SEC);
}
-TEST_F(ConfigUpdateE2eTest, TestGaugeMetric) {
+TEST_P(ConfigUpdateE2eTest, TestGaugeMetric) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
config.add_default_pull_packages("AID_ROOT"); // Fake puller is registered with root.
@@ -936,9 +988,19 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = getElapsedRealtimeNs(); // 0:10
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(TEN_MINUTES) * 1000000LL;
- sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
- bucketStartTimeNs, bucketStartTimeNs, config, key,
- SharedRefBase::make<FakeSubsystemSleepCallback>(), util::SUBSYSTEM_SLEEP_STATE);
+ if (GetParam()) {
+ // call from StatsLogProcessor constructor
+ Expectation initCall = EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ EXPECT_CALL(*mLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(config), _))
+ .Times(1)
+ .After(initCall);
+ }
+ sp<StatsLogProcessor> processor =
+ CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key,
+ SharedRefBase::make<FakeSubsystemSleepCallback>(),
+ util::SUBSYSTEM_SLEEP_STATE, new UidMap(), mLogEventFilter);
int app1Uid = 123, app2Uid = 456;
@@ -1004,6 +1066,9 @@
int64_t updateTimeNs = bucketStartTimeNs + 60 * NS_PER_SEC;
// Update pulls gaugePullPersist and gaugeNew.
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(newConfig), _)).Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// Verify puller manager is properly set.
@@ -1270,7 +1335,7 @@
EXPECT_EQ(data.bucket_info(0).atom(0).subsystem_sleep_state().time_millis(), 902);
}
-TEST_F(ConfigUpdateE2eTest, TestValueMetric) {
+TEST_P(ConfigUpdateE2eTest, TestValueMetric) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
config.add_default_pull_packages("AID_ROOT"); // Fake puller is registered with root.
@@ -1324,10 +1389,20 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = getElapsedRealtimeNs();
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(TEN_MINUTES) * 1000000LL;
+ if (GetParam()) {
+ // call from StatsLogProcessor constructor
+ Expectation initCall = EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ EXPECT_CALL(*mLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(config), _))
+ .Times(1)
+ .After(initCall);
+ }
// Config creation triggers pull #1.
- sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
- bucketStartTimeNs, bucketStartTimeNs, config, key,
- SharedRefBase::make<FakeSubsystemSleepCallback>(), util::SUBSYSTEM_SLEEP_STATE);
+ sp<StatsLogProcessor> processor =
+ CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key,
+ SharedRefBase::make<FakeSubsystemSleepCallback>(),
+ util::SUBSYSTEM_SLEEP_STATE, new UidMap(), mLogEventFilter);
// Initialize log events before update.
// ValuePushPersist and ValuePullPersist will skip the bucket due to condition unknown.
@@ -1375,6 +1450,9 @@
int64_t updateTimeNs = bucketStartTimeNs + 30 * NS_PER_SEC;
// Update pulls valuePullPersist and valueNew. Pull #3.
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(newConfig), _)).Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// Verify puller manager is properly set.
@@ -1455,6 +1533,7 @@
EXPECT_FALSE(data.has_dimensions_in_what());
EXPECT_EQ(data.slice_by_state_size(), 0);
ASSERT_EQ(data.bucket_info_size(), 1);
+ EXPECT_EQ(3, data.bucket_info(0).values(0).sample_size());
ValidateValueBucket(data.bucket_info(0), roundedBucketStartNs, roundedUpdateTimeNs, {20}, 0, 0);
// Min screen brightness while screen on. Bucket skipped due to condition unknown.
@@ -1596,7 +1675,7 @@
EXPECT_EQ(skipBucket.drop_event(0).drop_reason(), BucketDropReason::DUMP_REPORT_REQUESTED);
}
-TEST_F(ConfigUpdateE2eTest, TestKllMetric) {
+TEST_P(ConfigUpdateE2eTest, TestKllMetric) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
@@ -1631,8 +1710,8 @@
ConfigKey key(123, 987);
const uint64_t bucketStartTimeNs = 10000000000; // 0:10
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(TEN_MINUTES) * 1000000LL;
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
// Initialize log events before update.
// kllPersist and kllRemove will skip the bucket due to condition unknown.
@@ -1678,6 +1757,11 @@
*newConfig.add_kll_metric() = kllPersist;
int64_t updateTimeNs = bucketStartTimeNs + 30 * NS_PER_SEC;
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(newConfig), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// Send events after the update. This is a new bucket.
@@ -1809,7 +1893,7 @@
EXPECT_EQ(kllPersistAfter.kll_metrics().skipped_size(), 0);
}
-TEST_F(ConfigUpdateE2eTest, TestMetricActivation) {
+TEST_P(ConfigUpdateE2eTest, TestMetricActivation) {
ALOGE("Start ConfigUpdateE2eTest#TestMetricActivation");
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
@@ -1887,8 +1971,8 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = 10000000000; // 0:10
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(TEN_MINUTES) * 1000000LL;
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
int uid1 = 55555;
// Initialize log events before update.
@@ -1937,6 +2021,11 @@
*newConfig.add_metric_activation() = immediateMetricActivation;
int64_t updateTimeNs = bucketStartTimeNs + 40 * NS_PER_SEC;
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(newConfig), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// The reboot will write to disk again, so sleep for 1 second to avoid this.
@@ -1955,7 +2044,7 @@
// On boot, use StartUp. However, skip config manager for simplicity.
int64_t bootTimeNs = bucketStartTimeNs + 55 * NS_PER_SEC;
- processor = CreateStatsLogProcessor(bootTimeNs, bootTimeNs, newConfig, key);
+ processor = CreateStatsLogProcessor(bootTimeNs, bootTimeNs, newConfig, key, mLogEventFilter);
processor->LoadActiveConfigsFromDisk();
processor->LoadMetadataFromDisk(getWallClockNs(), bootTimeNs);
@@ -2060,7 +2149,7 @@
ALOGE("End ConfigUpdateE2eTest#TestMetricActivation");
}
-TEST_F(ConfigUpdateE2eTest, TestAnomalyCountMetric) {
+TEST_P(ConfigUpdateE2eTest, TestAnomalyCountMetric) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
@@ -2157,8 +2246,8 @@
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(TEN_MINUTES) * 1000000LL;
uint64_t bucketStartTimeNs = 10000000000; // 0:10
uint64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
StatsDimensionsValueParcel wlUid1 =
CreateAttributionUidDimensionsValueParcel(util::WAKELOCK_STATE_CHANGED, app1Uid);
@@ -2236,6 +2325,11 @@
SubscriberReporter::getInstance().setBroadcastSubscriber(key, newSubId, newBroadcast);
int64_t updateTimeNs = bucket2StartTimeNs + 15 * NS_PER_SEC;
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(newConfig), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// Within refractory of AlertPreserve, but AlertNew should fire since the full bucket has 2.
@@ -2271,7 +2365,7 @@
SubscriberReporter::getInstance().unsetBroadcastSubscriber(key, newSubId);
}
-TEST_F(ConfigUpdateE2eTest, TestAnomalyDurationMetric) {
+TEST_P(ConfigUpdateE2eTest, TestAnomalyDurationMetric) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
@@ -2374,7 +2468,18 @@
SubscriberReporter::getInstance().setBroadcastSubscriber(key, replaceSubId, replaceBroadcast);
SubscriberReporter::getInstance().setBroadcastSubscriber(key, removeSubId, removeBroadcast);
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+ const sp<UidMap> uidMap = new UidMap();
+ if (GetParam()) {
+ // call from StatsLogProcessor constructor
+ Expectation initCall = EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+ EXPECT_CALL(*mLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(config), _))
+ .Times(1)
+ .After(initCall);
+ }
+ const shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(
+ uidMap, /* queue */ nullptr, /* LogEventFilter */ mLogEventFilter);
sp<StatsLogProcessor> processor = service->mProcessor;
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(TEN_MINUTES) * 1000000LL;
int64_t bucketStartTimeNs = processor->mTimeBaseNs;
@@ -2561,6 +2666,9 @@
SubscriberReporter::getInstance().setBroadcastSubscriber(key, newSubId, newBroadcast);
int64_t updateTimeNs = bucket2StartTimeNs + 50 * NS_PER_SEC;
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(newConfig), _)).Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, newConfig);
// Alert preserve will set alarm after the refractory period, but alert new will set it for
@@ -2620,7 +2728,7 @@
SubscriberReporter::getInstance().unsetBroadcastSubscriber(key, newSubId);
}
-TEST_F(ConfigUpdateE2eTest, TestAlarms) {
+TEST_P(ConfigUpdateE2eTest, TestAlarms) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
Alarm alarmPreserve = createAlarm("AlarmPreserve", /*offset*/ 5 * MS_PER_SEC,
@@ -2692,7 +2800,7 @@
int64_t startTimeSec = 10;
sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
- startTimeSec * NS_PER_SEC, startTimeSec * NS_PER_SEC, config, key);
+ startTimeSec * NS_PER_SEC, startTimeSec * NS_PER_SEC, config, key, mLogEventFilter);
sp<AlarmMonitor> alarmMonitor = processor->getPeriodicAlarmMonitor();
// First alarm is for alarm preserve's offset of 5 seconds.
@@ -2748,7 +2856,9 @@
return Status::ok();
});
SubscriberReporter::getInstance().setBroadcastSubscriber(key, newSubId, newBroadcast);
-
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(newConfig), _)).Times(1);
+ }
processor->OnConfigUpdated((startTimeSec + 90) * NS_PER_SEC, key, newConfig);
// After the update, the alarm time should remain unchanged since alarm replace now fires every
// minute with no offset.
@@ -2788,7 +2898,7 @@
SubscriberReporter::getInstance().unsetBroadcastSubscriber(key, newSubId);
}
-TEST_F(ConfigUpdateE2eTest, TestNewDurationExistingWhat) {
+TEST_P(ConfigUpdateE2eTest, TestNewDurationExistingWhat) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
*config.add_atom_matcher() = CreateAcquireWakelockAtomMatcher();
@@ -2800,8 +2910,8 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = 10000000000; // 0:10
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(FIVE_MINUTES) * 1000000LL;
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
int app1Uid = 123;
vector<int> attributionUids1 = {app1Uid};
@@ -2820,6 +2930,11 @@
durationMetric->set_bucket(FIVE_MINUTES);
uint64_t updateTimeNs = bucketStartTimeNs + 60 * NS_PER_SEC; // 1:00
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(config), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, config);
event = CreateReleaseWakelockEvent(bucketStartTimeNs + 80 * NS_PER_SEC, attributionUids1,
@@ -2850,7 +2965,7 @@
EXPECT_EQ(bucketInfo.duration_nanos(), 20 * NS_PER_SEC);
}
-TEST_F(ConfigUpdateE2eTest, TestNewDurationExistingWhatSlicedCondition) {
+TEST_P(ConfigUpdateE2eTest, TestNewDurationExistingWhatSlicedCondition) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
*config.add_atom_matcher() = CreateAcquireWakelockAtomMatcher();
@@ -2872,8 +2987,8 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = 10000000000; // 0:10
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(FIVE_MINUTES) * 1000000LL;
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
int app1Uid = 123, app2Uid = 456;
vector<int> attributionUids1 = {app1Uid};
@@ -2910,6 +3025,11 @@
CreateDimensions(util::ACTIVITY_FOREGROUND_STATE_CHANGED, {1 /*uid*/});
uint64_t updateTimeNs = bucketStartTimeNs + 60 * NS_PER_SEC; // 1:00
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(config), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, config);
event = CreateMoveToBackgroundEvent(bucketStartTimeNs + 73 * NS_PER_SEC, app2Uid); // 1:13
@@ -2949,7 +3069,7 @@
EXPECT_EQ(bucketInfo.duration_nanos(), 17 * NS_PER_SEC);
}
-TEST_F(ConfigUpdateE2eTest, TestNewDurationExistingWhatSlicedState) {
+TEST_P(ConfigUpdateE2eTest, TestNewDurationExistingWhatSlicedState) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
*config.add_atom_matcher() = CreateAcquireWakelockAtomMatcher();
@@ -2984,8 +3104,8 @@
ConfigKey key(123, 987);
uint64_t bucketStartTimeNs = 10000000000; // 0:10
uint64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(FIVE_MINUTES) * 1000000LL;
- sp<StatsLogProcessor> processor =
- CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs,
+ config, key, mLogEventFilter);
int app1Uid = 123, app2Uid = 456;
vector<int> attributionUids1 = {app1Uid};
@@ -3024,6 +3144,11 @@
CreateDimensions(util::UID_PROCESS_STATE_CHANGED, {1 /*uid*/});
uint64_t updateTimeNs = bucketStartTimeNs + 60 * NS_PER_SEC; // 1:00
+ if (GetParam()) {
+ EXPECT_CALL(*mLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromConfig(config), processor.get()))
+ .Times(1);
+ }
processor->OnConfigUpdated(updateTimeNs, key, config);
event = CreateAcquireWakelockEvent(bucketStartTimeNs + 72 * NS_PER_SEC, attributionUids2,
diff --git a/statsd/tests/e2e/CountMetric_e2e_test.cpp b/statsd/tests/e2e/CountMetric_e2e_test.cpp
index ef183b7..10fdb8e 100644
--- a/statsd/tests/e2e/CountMetric_e2e_test.cpp
+++ b/statsd/tests/e2e/CountMetric_e2e_test.cpp
@@ -113,6 +113,164 @@
EXPECT_EQ(ConditionState::kTrue, metricProducer2->mCondition);
}
+TEST(CountMetricE2eTest, TestConditionTrueNanos) {
+ // Initialize config.
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+ config.add_default_pull_packages("AID_ROOT"); // Fake puller is registered with root.
+
+ AtomMatcher syncStartMatcher = CreateSyncStartAtomMatcher();
+ *config.add_atom_matcher() = syncStartMatcher;
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ *config.add_atom_matcher() = CreateScreenTurnedOffAtomMatcher();
+
+ AtomMatcher crashMatcher = CreateProcessCrashAtomMatcher();
+ *config.add_atom_matcher() = crashMatcher;
+
+ Predicate screenOnPredicate = CreateScreenIsOnPredicate();
+ *config.add_predicate() = screenOnPredicate;
+
+ CountMetric* countMetric = config.add_count_metric();
+ int64_t metricId = StringToId("CountSyncStartWhileScreenOn");
+ countMetric->set_id(metricId);
+ countMetric->set_what(syncStartMatcher.id());
+ countMetric->set_condition(screenOnPredicate.id());
+ countMetric->set_bucket(FIVE_MINUTES);
+
+ MetricActivation* metricActivation = config.add_metric_activation();
+ metricActivation->set_metric_id(metricId);
+ EventActivation* eventActivation = metricActivation->add_event_activation();
+ eventActivation->set_atom_matcher_id(crashMatcher.id());
+ eventActivation->set_ttl_seconds(360); // 6 minutes.
+
+ const uint64_t bucketStartTimeNs = 10000000000; // 0:10
+ const uint64_t bucketSizeNs =
+ TimeUnitToBucketSizeInMillis(config.count_metric(0).bucket()) * 1000000LL;
+ const uint64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
+ const uint64_t bucket3StartTimeNs = bucket2StartTimeNs + bucketSizeNs;
+
+ int uid = 12345;
+ int64_t cfgId = 98765;
+ ConfigKey cfgKey(uid, cfgId);
+ sp<StatsLogProcessor> processor =
+ CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, cfgKey);
+
+ std::vector<int> attributionUids1 = {123};
+ std::vector<string> attributionTags1 = {"App1"};
+ int64_t conditionStart1Ns = bucketStartTimeNs + 50 * NS_PER_SEC;
+ int64_t activationStart1Ns = bucketStartTimeNs + 70 * NS_PER_SEC;
+ int64_t conditionEnd1Ns = bucket2StartTimeNs + 35 * NS_PER_SEC;
+ int64_t conditionStart2Ns = bucket2StartTimeNs + 90 * NS_PER_SEC;
+ int64_t activationEnd1Ns = bucket2StartTimeNs + 140 * NS_PER_SEC;
+ int64_t conditionEnd2Ns = bucket2StartTimeNs + 155 * NS_PER_SEC;
+ int64_t activationStart2Ns = bucket2StartTimeNs + 200 * NS_PER_SEC;
+ int64_t conditionStart3Ns = bucket2StartTimeNs + 240 * NS_PER_SEC;
+ int64_t conditionEnd3Ns = bucket3StartTimeNs + 40 * NS_PER_SEC;
+ int64_t activationEnd2Ns = bucket3StartTimeNs + 270 * NS_PER_SEC;
+
+ std::vector<std::unique_ptr<LogEvent>> events;
+ // Active false, condition becomes true.
+ events.push_back(CreateScreenStateChangedEvent(
+ conditionStart1Ns,
+ android::view::DisplayStateEnum::DISPLAY_STATE_ON)); // 1:00
+ // Event not counted.
+ events.push_back(CreateSyncStartEvent(bucketStartTimeNs + 60 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 1:10
+ // Active becomes true, condition true.
+ events.push_back(CreateAppCrashEvent(activationStart1Ns, 111)); // 1:20
+ events.push_back(CreateSyncStartEvent(bucketStartTimeNs + 75 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 1:25
+
+ // Bucket 2 starts. 5:10
+ events.push_back(CreateSyncStartEvent(bucket2StartTimeNs + 20 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 5:30
+ // Active true, condition becomes false.
+ events.push_back(CreateScreenStateChangedEvent(
+ conditionEnd1Ns,
+ android::view::DisplayStateEnum::DISPLAY_STATE_OFF)); // 5:45
+ // Event not counted.
+ events.push_back(CreateSyncStartEvent(bucket2StartTimeNs + 50 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 6:00
+ // Active true, condition becomes true.
+ events.push_back(CreateScreenStateChangedEvent(
+ conditionStart2Ns,
+ android::view::DisplayStateEnum::DISPLAY_STATE_ON)); // 6:40
+ events.push_back(CreateSyncStartEvent(bucket2StartTimeNs + 110 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 7:00
+ // Active becomes false, condition true (activation expires).
+ // Event not counted.
+ events.push_back(CreateSyncStartEvent(activationEnd1Ns, attributionUids1, attributionTags1,
+ "sync_name")); // 7:30
+ // Active false, condition becomes false.
+ events.push_back(CreateScreenStateChangedEvent(
+ conditionEnd2Ns,
+ android::view::DisplayStateEnum::DISPLAY_STATE_OFF)); // 7:45
+ // Event not counted.
+ events.push_back(CreateSyncStartEvent(bucket2StartTimeNs + 160 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 7:50
+ // Active becomes true, condition false.
+ events.push_back(CreateAppCrashEvent(activationStart2Ns, 111)); // 8:30
+ // Event not counted.
+ events.push_back(CreateSyncStartEvent(bucket2StartTimeNs + 220 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 8:50
+ // Active true, condition becomes true.
+ events.push_back(CreateScreenStateChangedEvent(
+ conditionStart3Ns,
+ android::view::DisplayStateEnum::DISPLAY_STATE_ON)); // 9:10
+ events.push_back(CreateSyncStartEvent(bucket2StartTimeNs + 250 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 9:20
+
+ // Bucket 3 starts. 10:10
+ events.push_back(CreateSyncStartEvent(bucket3StartTimeNs + 10 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 10:20
+ // Active true, condition becomes false.
+ events.push_back(CreateScreenStateChangedEvent(
+ conditionEnd3Ns,
+ android::view::DisplayStateEnum::DISPLAY_STATE_OFF)); // 10:50
+ // Event not counted.
+ events.push_back(CreateSyncStartEvent(bucket3StartTimeNs + 70 * NS_PER_SEC, attributionUids1,
+ attributionTags1, "sync_name")); // 11:20
+ // Active becomes false, condition false (activation expires).
+ // Event not counted.
+ events.push_back(CreateSyncStartEvent(activationEnd2Ns, attributionUids1, attributionTags1,
+ "sync_name")); // 14:40
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ // Check dump report.
+ vector<uint8_t> buffer;
+ ConfigMetricsReportList reports;
+ int64_t dumpReportTimeNs = bucket3StartTimeNs + 290 * NS_PER_SEC; // 15:00
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, true, true, ADB_DUMP, FAST, &buffer);
+ ASSERT_GT(buffer.size(), 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ backfillDimensionPath(&reports);
+ backfillStringInReport(&reports);
+ backfillStartEndTimestamp(&reports);
+
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ EXPECT_TRUE(reports.reports(0).metrics(0).has_count_metrics());
+ StatsLogReport::CountMetricDataWrapper countMetrics;
+ sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).count_metrics(), &countMetrics);
+ ASSERT_EQ(1, countMetrics.data_size());
+
+ CountMetricData data = countMetrics.data(0);
+ ASSERT_EQ(4, data.bucket_info_size());
+ ValidateCountBucket(data.bucket_info(0), bucketStartTimeNs, bucket2StartTimeNs, 1,
+ bucket2StartTimeNs - activationStart1Ns);
+ ValidateCountBucket(
+ data.bucket_info(1), bucket2StartTimeNs, activationEnd1Ns, 2,
+ (conditionEnd1Ns - bucket2StartTimeNs) + (activationEnd1Ns - conditionStart2Ns));
+ ValidateCountBucket(data.bucket_info(2), activationEnd1Ns, bucket3StartTimeNs, 1,
+ bucket3StartTimeNs - conditionStart3Ns);
+ ValidateCountBucket(data.bucket_info(3), bucket3StartTimeNs, activationEnd2Ns, 1,
+ conditionEnd3Ns - bucket3StartTimeNs);
+}
+
/**
* Test a count metric that has one slice_by_state with no primary fields.
*
@@ -469,6 +627,8 @@
countMetric->set_what(appCrashMatcher.id());
countMetric->set_bucket(TimeUnit::FIVE_MINUTES);
countMetric->add_slice_by_state(state.id());
+ *countMetric->mutable_dimensions_in_what() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
MetricStateLink* stateLink = countMetric->add_state_link();
stateLink->set_state_atom_id(UID_PROCESS_STATE_ATOM_ID);
auto fieldsInWhat = stateLink->mutable_fields_in_what();
@@ -592,7 +752,7 @@
EXPECT_TRUE(reports.reports(0).metrics(0).has_count_metrics());
StatsLogReport::CountMetricDataWrapper countMetrics;
sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).count_metrics(), &countMetrics);
- ASSERT_EQ(5, countMetrics.data_size());
+ ASSERT_EQ(6, countMetrics.data_size());
// For each CountMetricData, check StateValue info is correct and buckets
// have correct counts.
@@ -601,6 +761,7 @@
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
EXPECT_TRUE(data.slice_by_state(0).has_value());
EXPECT_EQ(-1 /* StateTracker::kStateUnknown */, data.slice_by_state(0).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
ASSERT_EQ(1, data.bucket_info_size());
EXPECT_EQ(1, data.bucket_info(0).count());
@@ -609,14 +770,16 @@
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
EXPECT_TRUE(data.slice_by_state(0).has_value());
EXPECT_EQ(android::app::PROCESS_STATE_TOP, data.slice_by_state(0).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
ASSERT_EQ(1, data.bucket_info_size());
- EXPECT_EQ(2, data.bucket_info(0).count());
+ EXPECT_EQ(1, data.bucket_info(0).count());
data = countMetrics.data(2);
ASSERT_EQ(1, data.slice_by_state_size());
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
EXPECT_TRUE(data.slice_by_state(0).has_value());
EXPECT_EQ(android::app::PROCESS_STATE_FOREGROUND_SERVICE, data.slice_by_state(0).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
ASSERT_EQ(2, data.bucket_info_size());
EXPECT_EQ(1, data.bucket_info(0).count());
EXPECT_EQ(2, data.bucket_info(1).count());
@@ -626,6 +789,7 @@
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
EXPECT_TRUE(data.slice_by_state(0).has_value());
EXPECT_EQ(android::app::PROCESS_STATE_IMPORTANT_FOREGROUND, data.slice_by_state(0).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
ASSERT_EQ(1, data.bucket_info_size());
EXPECT_EQ(2, data.bucket_info(0).count());
@@ -633,7 +797,17 @@
ASSERT_EQ(1, data.slice_by_state_size());
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
EXPECT_TRUE(data.slice_by_state(0).has_value());
+ EXPECT_EQ(android::app::PROCESS_STATE_TOP, data.slice_by_state(0).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 2);
+ ASSERT_EQ(1, data.bucket_info_size());
+ EXPECT_EQ(1, data.bucket_info(0).count());
+
+ data = countMetrics.data(5);
+ ASSERT_EQ(1, data.slice_by_state_size());
+ EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
+ EXPECT_TRUE(data.slice_by_state(0).has_value());
EXPECT_EQ(android::app::PROCESS_STATE_IMPORTANT_BACKGROUND, data.slice_by_state(0).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 2);
ASSERT_EQ(1, data.bucket_info_size());
EXPECT_EQ(1, data.bucket_info(0).count());
}
@@ -663,6 +837,8 @@
countMetric->set_bucket(TimeUnit::FIVE_MINUTES);
countMetric->add_slice_by_state(state1.id());
countMetric->add_slice_by_state(state2.id());
+ *countMetric->mutable_dimensions_in_what() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
MetricStateLink* stateLink = countMetric->add_state_link();
stateLink->set_state_atom_id(UID_PROCESS_STATE_ATOM_ID);
auto fieldsInWhat = stateLink->mutable_fields_in_what();
@@ -821,7 +997,7 @@
EXPECT_TRUE(reports.reports(0).metrics(0).has_count_metrics());
StatsLogReport::CountMetricDataWrapper countMetrics;
sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).count_metrics(), &countMetrics);
- ASSERT_EQ(6, countMetrics.data_size());
+ ASSERT_EQ(7, countMetrics.data_size());
// For each CountMetricData, check StateValue info is correct and buckets
// have correct counts.
@@ -833,6 +1009,7 @@
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(1).atom_id());
EXPECT_TRUE(data.slice_by_state(1).has_value());
EXPECT_EQ(android::app::PROCESS_STATE_IMPORTANT_FOREGROUND, data.slice_by_state(1).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
ASSERT_EQ(1, data.bucket_info_size());
EXPECT_EQ(1, data.bucket_info(0).count());
@@ -844,6 +1021,7 @@
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(1).atom_id());
EXPECT_TRUE(data.slice_by_state(1).has_value());
EXPECT_EQ(android::app::PROCESS_STATE_IMPORTANT_FOREGROUND, data.slice_by_state(1).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
ASSERT_EQ(1, data.bucket_info_size());
EXPECT_EQ(1, data.bucket_info(0).count());
@@ -851,10 +1029,11 @@
ASSERT_EQ(2, data.slice_by_state_size());
EXPECT_EQ(SCREEN_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
EXPECT_TRUE(data.slice_by_state(0).has_group_id());
- EXPECT_EQ(screenOnId, data.slice_by_state(0).group_id());
+ EXPECT_EQ(screenOffId, data.slice_by_state(0).group_id());
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(1).atom_id());
EXPECT_TRUE(data.slice_by_state(1).has_value());
- EXPECT_EQ(android::app::PROCESS_STATE_IMPORTANT_BACKGROUND, data.slice_by_state(1).value());
+ EXPECT_EQ(android::app::PROCESS_STATE_TOP, data.slice_by_state(1).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
ASSERT_EQ(1, data.bucket_info_size());
EXPECT_EQ(1, data.bucket_info(0).count());
@@ -865,9 +1044,10 @@
EXPECT_EQ(screenOffId, data.slice_by_state(0).group_id());
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(1).atom_id());
EXPECT_TRUE(data.slice_by_state(1).has_value());
- EXPECT_EQ(android::app::PROCESS_STATE_TOP, data.slice_by_state(1).value());
+ EXPECT_EQ(android::app::PROCESS_STATE_FOREGROUND_SERVICE, data.slice_by_state(1).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
ASSERT_EQ(1, data.bucket_info_size());
- EXPECT_EQ(2, data.bucket_info(0).count());
+ EXPECT_EQ(1, data.bucket_info(0).count());
data = countMetrics.data(4);
ASSERT_EQ(2, data.slice_by_state_size());
@@ -876,21 +1056,35 @@
EXPECT_EQ(screenOffId, data.slice_by_state(0).group_id());
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(1).atom_id());
EXPECT_TRUE(data.slice_by_state(1).has_value());
- EXPECT_EQ(android::app::PROCESS_STATE_FOREGROUND_SERVICE, data.slice_by_state(1).value());
+ EXPECT_EQ(android::app::PROCESS_STATE_IMPORTANT_FOREGROUND, data.slice_by_state(1).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 1);
+ ASSERT_EQ(2, data.bucket_info_size());
+ EXPECT_EQ(2, data.bucket_info(0).count());
+ EXPECT_EQ(1, data.bucket_info(1).count());
+
+ data = countMetrics.data(5);
+ ASSERT_EQ(2, data.slice_by_state_size());
+ EXPECT_EQ(SCREEN_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
+ EXPECT_TRUE(data.slice_by_state(0).has_group_id());
+ EXPECT_EQ(screenOnId, data.slice_by_state(0).group_id());
+ EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(1).atom_id());
+ EXPECT_TRUE(data.slice_by_state(1).has_value());
+ EXPECT_EQ(android::app::PROCESS_STATE_IMPORTANT_BACKGROUND, data.slice_by_state(1).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 2);
ASSERT_EQ(1, data.bucket_info_size());
EXPECT_EQ(1, data.bucket_info(0).count());
- data = countMetrics.data(5);
+ data = countMetrics.data(6);
ASSERT_EQ(2, data.slice_by_state_size());
EXPECT_EQ(SCREEN_STATE_ATOM_ID, data.slice_by_state(0).atom_id());
EXPECT_TRUE(data.slice_by_state(0).has_group_id());
EXPECT_EQ(screenOffId, data.slice_by_state(0).group_id());
EXPECT_EQ(UID_PROCESS_STATE_ATOM_ID, data.slice_by_state(1).atom_id());
EXPECT_TRUE(data.slice_by_state(1).has_value());
- EXPECT_EQ(android::app::PROCESS_STATE_IMPORTANT_FOREGROUND, data.slice_by_state(1).value());
- ASSERT_EQ(2, data.bucket_info_size());
- EXPECT_EQ(2, data.bucket_info(0).count());
- EXPECT_EQ(1, data.bucket_info(1).count());
+ EXPECT_EQ(android::app::PROCESS_STATE_TOP, data.slice_by_state(1).value());
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, 2);
+ ASSERT_EQ(1, data.bucket_info_size());
+ EXPECT_EQ(1, data.bucket_info(0).count());
}
TEST(CountMetricE2eTest, TestUploadThreshold) {
@@ -1747,9 +1941,92 @@
1);
}
+TEST(CountMetricE2eTest, TestDimensionalSampling) {
+ ShardOffsetProvider::getInstance().setShardOffset(5);
+
+ // Initialize config.
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+
+ AtomMatcher appCrashMatcher =
+ CreateSimpleAtomMatcher("APP_CRASH_OCCURRED", util::APP_CRASH_OCCURRED);
+ *config.add_atom_matcher() = appCrashMatcher;
+
+ CountMetric sampledCountMetric =
+ createCountMetric("CountSampledAppCrashesPerUid", appCrashMatcher.id(), nullopt, {});
+ *sampledCountMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ *sampledCountMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ sampledCountMetric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = sampledCountMetric;
+
+ // Initialize StatsLogProcessor.
+ const uint64_t bucketStartTimeNs = 10000000000; // 0:10
+ const uint64_t bucketSizeNs =
+ TimeUnitToBucketSizeInMillis(config.count_metric(0).bucket()) * 1000000LL;
+ int uid = 12345;
+ int64_t cfgId = 98765;
+ ConfigKey cfgKey(uid, cfgId);
+
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ bucketStartTimeNs, bucketStartTimeNs, config, cfgKey, nullptr, 0, new UidMap());
+
+ int appUid1 = 1001; // odd hash value
+ int appUid2 = 1002; // even hash value
+ int appUid3 = 1003; // odd hash value
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(
+ CreateAppCrashOccurredEvent(bucketStartTimeNs + 20 * NS_PER_SEC, appUid1)); // 0:30
+ events.push_back(
+ CreateAppCrashOccurredEvent(bucketStartTimeNs + 40 * NS_PER_SEC, appUid2)); // 0:50
+ events.push_back(
+ CreateAppCrashOccurredEvent(bucketStartTimeNs + 60 * NS_PER_SEC, appUid3)); // 1:10
+ events.push_back(
+ CreateAppCrashOccurredEvent(bucketStartTimeNs + 80 * NS_PER_SEC, appUid1)); // 1:20
+ events.push_back(
+ CreateAppCrashOccurredEvent(bucketStartTimeNs + 90 * NS_PER_SEC, appUid2)); // 1:30
+ events.push_back(
+ CreateAppCrashOccurredEvent(bucketStartTimeNs + 100 * NS_PER_SEC, appUid3)); // 1:40
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ // Check dump report.
+ vector<uint8_t> buffer;
+ ConfigMetricsReportList reports;
+ processor->onDumpReport(cfgKey, bucketStartTimeNs + bucketSizeNs + 1, false, true, ADB_DUMP,
+ FAST, &buffer);
+ ASSERT_GT(buffer.size(), 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ backfillDimensionPath(&reports);
+ backfillStringInReport(&reports);
+ backfillStartEndTimestamp(&reports);
+
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ EXPECT_TRUE(reports.reports(0).metrics(0).has_count_metrics());
+ StatsLogReport::CountMetricDataWrapper countMetrics;
+ sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).count_metrics(), &countMetrics);
+ ASSERT_EQ(2, countMetrics.data_size());
+
+ // Only Uid 1 and 3 are logged. (odd hash value) + (offset of 5) % (shard count of 2) = 0
+ CountMetricData data = countMetrics.data(0);
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, appUid1);
+ ValidateCountBucket(data.bucket_info(0), bucketStartTimeNs, bucketStartTimeNs + bucketSizeNs,
+ 2);
+
+ data = countMetrics.data(1);
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, appUid3);
+ ValidateCountBucket(data.bucket_info(0), bucketStartTimeNs, bucketStartTimeNs + bucketSizeNs,
+ 2);
+}
+
} // namespace statsd
} // namespace os
} // namespace android
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
-#endif
+#endif
\ No newline at end of file
diff --git a/statsd/tests/e2e/DurationMetric_e2e_test.cpp b/statsd/tests/e2e/DurationMetric_e2e_test.cpp
index 3202368..4dcef51 100644
--- a/statsd/tests/e2e/DurationMetric_e2e_test.cpp
+++ b/statsd/tests/e2e/DurationMetric_e2e_test.cpp
@@ -237,6 +237,10 @@
sp<AlarmMonitor> subscriberAlarmMonitor;
vector<int64_t> activeConfigsBroadcast;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
+
int broadcastCount = 0;
StatsLogProcessor processor(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, bucketStartTimeNs,
@@ -249,7 +253,11 @@
activeConfigsBroadcast.insert(activeConfigsBroadcast.end(), activeConfigs.begin(),
activeConfigs.end());
return true;
- });
+ },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(CreateAtomIdSetFromConfig(config), &processor))
+ .Times(1);
processor.OnConfigUpdated(bucketStartTimeNs, cfgKey, config); // 0:00
@@ -406,28 +414,36 @@
int appUid = 123;
vector<int> attributionUids1 = {appUid};
vector<string> attributionTags1 = {"App1"};
+ int64_t conditionStartTime1Ns = bucketStartTimeNs + 22 * NS_PER_SEC;
+ int64_t conditionEndTimeNs = bucketStartTimeNs + (3 * 60 + 15) * NS_PER_SEC;
+ int64_t conditionStartTime2Ns = bucketStartTimeNs + (4 * 60 + 20) * NS_PER_SEC;
+ int64_t bucket2StartTimeNs = bucketStartTimeNs + bucketSizeNs;
- auto event = CreateAcquireWakelockEvent(bucketStartTimeNs + 10 * NS_PER_SEC, attributionUids1,
- attributionTags1,
- "wl1"); // 0:10
- processor->OnLogEvent(event.get());
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateAcquireWakelockEvent(bucketStartTimeNs + 10 * NS_PER_SEC,
+ attributionUids1, attributionTags1,
+ "wl1")); // 0:10
+ events.push_back(CreateMoveToBackgroundEvent(conditionStartTime1Ns, appUid)); // 0:22
+ events.push_back(CreateMoveToForegroundEvent(conditionEndTimeNs,
+ appUid)); // 3:15
+ events.push_back(CreateReleaseWakelockEvent(bucketStartTimeNs + 4 * 60 * NS_PER_SEC,
+ attributionUids1, attributionTags1,
+ "wl1")); // 4:00
+ events.push_back(CreateMoveToBackgroundEvent(conditionStartTime2Ns, appUid)); // 4:20
- event = CreateMoveToBackgroundEvent(bucketStartTimeNs + 22 * NS_PER_SEC, appUid); // 0:22
- processor->OnLogEvent(event.get());
-
- event = CreateMoveToForegroundEvent(bucketStartTimeNs + (3 * 60 + 15) * NS_PER_SEC,
- appUid); // 3:15
- processor->OnLogEvent(event.get());
-
- event = CreateReleaseWakelockEvent(bucketStartTimeNs + 4 * 60 * NS_PER_SEC, attributionUids1,
- attributionTags1,
- "wl1"); // 4:00
- processor->OnLogEvent(event.get());
+ // Bucket 2.
+ events.push_back(CreateAcquireWakelockEvent(bucket2StartTimeNs + 10 * NS_PER_SEC,
+ attributionUids1, attributionTags1,
+ "wl1")); // 5:10
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
vector<uint8_t> buffer;
ConfigMetricsReportList reports;
- processor->onDumpReport(cfgKey, bucketStartTimeNs + bucketSizeNs + 1, false, true, ADB_DUMP,
- FAST, &buffer);
+ int64_t dumpReportTimeNs = bucket2StartTimeNs + 40 * NS_PER_SEC;
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, true, true, ADB_DUMP, FAST, &buffer); // 5:40
ASSERT_GT(buffer.size(), 0);
EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
backfillDimensionPath(&reports);
@@ -441,15 +457,15 @@
&durationMetrics);
ASSERT_EQ(1, durationMetrics.data_size());
- DurationMetricData data = durationMetrics.data(0);
-
// Validate bucket info.
- ASSERT_EQ(1, data.bucket_info_size());
-
- auto bucketInfo = data.bucket_info(0);
- EXPECT_EQ(bucketStartTimeNs, bucketInfo.start_bucket_elapsed_nanos());
- EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, bucketInfo.end_bucket_elapsed_nanos());
- EXPECT_EQ((2 * 60 + 53) * NS_PER_SEC, bucketInfo.duration_nanos());
+ ASSERT_EQ(2, durationMetrics.data(0).bucket_info_size());
+ ValidateDurationBucket(durationMetrics.data(0).bucket_info(0), bucketStartTimeNs,
+ bucket2StartTimeNs, conditionEndTimeNs - conditionStartTime1Ns,
+ (conditionEndTimeNs - conditionStartTime1Ns) +
+ (bucket2StartTimeNs - conditionStartTime2Ns));
+ ValidateDurationBucket(durationMetrics.data(0).bucket_info(1), bucket2StartTimeNs,
+ dumpReportTimeNs, 30 * NS_PER_SEC,
+ dumpReportTimeNs - bucket2StartTimeNs);
}
TEST(DurationMetricE2eTest, TestWithSlicedCondition) {
@@ -1649,6 +1665,106 @@
EXPECT_EQ(baseTimeNs + bucketSizeNs, data.bucket_info(0).end_bucket_elapsed_nanos());
}
+TEST(DurationMetricE2eTest, TestDimensionalSampling) {
+ ShardOffsetProvider::getInstance().setShardOffset(5);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+
+ *config.add_atom_matcher() = CreateStartScheduledJobAtomMatcher();
+ *config.add_atom_matcher() = CreateFinishScheduledJobAtomMatcher();
+ AtomMatcher stopAllMatcher = CreateScheduleScheduledJobAtomMatcher();
+ *config.add_atom_matcher() = stopAllMatcher;
+
+ Predicate scheduledJobPredicate = CreateScheduledJobPredicate();
+ *scheduledJobPredicate.mutable_simple_predicate()->mutable_dimensions() =
+ CreateAttributionUidDimensions(util::SCHEDULED_JOB_STATE_CHANGED, {Position::FIRST});
+ SimplePredicate* simplePredicate = scheduledJobPredicate.mutable_simple_predicate();
+ simplePredicate->set_stop_all(stopAllMatcher.id());
+ *config.add_predicate() = scheduledJobPredicate;
+
+ DurationMetric sampledDurationMetric = createDurationMetric(
+ "DurationSampledScheduledJobPerUid", scheduledJobPredicate.id(), nullopt, {});
+ sampledDurationMetric.set_aggregation_type(DurationMetric::SUM);
+ *sampledDurationMetric.mutable_dimensions_in_what() =
+ CreateAttributionUidDimensions(util::SCHEDULED_JOB_STATE_CHANGED, {Position::FIRST});
+ *sampledDurationMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateAttributionUidDimensions(util::SCHEDULED_JOB_STATE_CHANGED, {Position::FIRST});
+ sampledDurationMetric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_duration_metric() = sampledDurationMetric;
+
+ const int64_t configAddedTimeNs = 1 * NS_PER_SEC; // 0:01
+ const int64_t bucketSizeNs =
+ TimeUnitToBucketSizeInMillis(config.duration_metric(0).bucket()) * 1000LL * 1000LL;
+
+ int uid = 12345;
+ int64_t cfgId = 98765;
+ ConfigKey cfgKey(uid, cfgId);
+
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ configAddedTimeNs, configAddedTimeNs, config, cfgKey, nullptr, 0, new UidMap());
+
+ int uid1 = 1001; // odd hash value
+ int uid2 = 1002; // even hash value
+ int uid3 = 1003; // odd hash value
+
+ const int64_t durationStartNs1 = 20 * NS_PER_SEC;
+ const int64_t durationStartNs2 = 40 * NS_PER_SEC;
+ const int64_t durationStartNs3 = 60 * NS_PER_SEC;
+ const int64_t durationStartNs4 = 80 * NS_PER_SEC;
+ const int64_t durationEndNs1 = 100 * NS_PER_SEC;
+ const int64_t durationEndNs2 = 110 * NS_PER_SEC;
+ const int64_t stopAllNs = 130 * NS_PER_SEC;
+ const int64_t durationEndNs3 = 150 * NS_PER_SEC;
+ const int64_t durationEndNs4 = 200 * NS_PER_SEC;
+
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateStartScheduledJobEvent(durationStartNs1, {uid1}, {"App1"}, "job1"));
+ events.push_back(CreateStartScheduledJobEvent(durationStartNs2, {uid2}, {"App2"}, "job2"));
+ events.push_back(CreateStartScheduledJobEvent(durationStartNs3, {uid3}, {"App3"}, "job3"));
+ events.push_back(CreateFinishScheduledJobEvent(durationEndNs1, {uid1}, {"App1"}, "job1"));
+ events.push_back(CreateFinishScheduledJobEvent(durationEndNs2, {uid2}, {"App2"}, "job2"));
+ // This event should pass the sample check regardless of the uid.
+ events.push_back(CreateScheduleScheduledJobEvent(stopAllNs, {uid2}, {"App2"}, "job2"));
+ // These events shouldn't do anything since all jobs were stopped with the cancel event.
+ events.push_back(CreateFinishScheduledJobEvent(durationEndNs3, {uid3}, {"App3"}, "job3"));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ ConfigMetricsReportList reports;
+ vector<uint8_t> buffer;
+ processor->onDumpReport(cfgKey, configAddedTimeNs + bucketSizeNs + 1, false, true, ADB_DUMP,
+ FAST, &buffer);
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ backfillDimensionPath(&reports);
+ backfillStartEndTimestamp(&reports);
+
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ EXPECT_TRUE(reports.reports(0).metrics(0).has_duration_metrics());
+ StatsLogReport::DurationMetricDataWrapper durationMetrics;
+ sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).duration_metrics(),
+ &durationMetrics);
+ ASSERT_EQ(2, durationMetrics.data_size());
+
+ // Only Uid 1 and 3 are logged. (odd hash value) + (offset of 5) % (shard count of 2) = 0
+ DurationMetricData data = durationMetrics.data(0);
+ ValidateAttributionUidDimension(data.dimensions_in_what(), util::SCHEDULED_JOB_STATE_CHANGED,
+ uid1);
+ ValidateDurationBucket(data.bucket_info(0), configAddedTimeNs, configAddedTimeNs + bucketSizeNs,
+ durationEndNs1 - durationStartNs1);
+
+ data = durationMetrics.data(1);
+ ValidateAttributionUidDimension(data.dimensions_in_what(), util::SCHEDULED_JOB_STATE_CHANGED,
+ uid3);
+ ValidateDurationBucket(data.bucket_info(0), configAddedTimeNs, configAddedTimeNs + bucketSizeNs,
+ stopAllNs - durationStartNs3);
+}
+
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif
diff --git a/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp b/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
index 9f3ace4..474cb7a 100644
--- a/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
+++ b/statsd/tests/e2e/GaugeMetric_e2e_pull_test.cpp
@@ -60,24 +60,15 @@
gaugeMetric->set_bucket(FIVE_MINUTES);
gaugeMetric->set_max_pull_delay_sec(INT_MAX);
config.set_hash_strings_in_metric_report(false);
+ gaugeMetric->set_split_bucket_for_app_upgrade(true);
+ gaugeMetric->set_min_bucket_size_nanos(1000);
return config;
}
} // namespaces
-// Setup for test fixture.
-class GaugeMetricE2ePulledTest : public ::testing::Test {
- void SetUp() override {
- FlagProvider::getInstance().overrideFuncs(&isAtLeastSFuncTrue);
- }
-
- void TearDown() override {
- FlagProvider::getInstance().resetOverrides();
- }
-};
-
-TEST_F(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvents) {
+TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvents) {
auto config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE);
int64_t baseTimeNs = getElapsedRealtimeNs();
int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
@@ -217,7 +208,129 @@
EXPECT_GT(data.bucket_info(5).atom(0).subsystem_sleep_state().time_millis(), 0);
}
-TEST_F(GaugeMetricE2ePulledTest, TestConditionChangeToTrueSamplePulledEvents) {
+TEST(GaugeMetricE2ePulledTest, TestFirstNSamplesPulledNoTrigger) {
+ StatsdConfig config = CreateStatsdConfig(GaugeMetric::FIRST_N_SAMPLES);
+ auto gaugeMetric = config.mutable_gauge_metric(0);
+ gaugeMetric->set_max_num_gauge_atoms_per_bucket(3);
+ int64_t baseTimeNs = getElapsedRealtimeNs();
+ int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(config.gauge_metric(0).bucket()) * 1000000;
+
+ ConfigKey cfgKey;
+ auto processor =
+ CreateStatsLogProcessor(baseTimeNs, configAddedTimeNs, config, cfgKey,
+ SharedRefBase::make<FakeSubsystemSleepCallback>(), ATOM_TAG);
+ ASSERT_EQ(processor->mMetricsManagers.size(), 1u);
+ EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
+ processor->mPullerManager->ForceClearPullerCache();
+
+ // When creating the config, the gauge metric producer should register the alarm at the
+ // end of the current bucket.
+ ASSERT_EQ((size_t)1, processor->mPullerManager->mReceivers.size());
+ EXPECT_EQ(bucketSizeNs,
+ processor->mPullerManager->mReceivers.begin()->second.front().intervalNs);
+ int64_t& nextPullTimeNs =
+ processor->mPullerManager->mReceivers.begin()->second.front().nextPullTimeNs;
+
+ auto screenOffEvent =
+ CreateScreenStateChangedEvent(configAddedTimeNs + 55, android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ auto screenOnEvent =
+ CreateScreenStateChangedEvent(configAddedTimeNs + 100, android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + 150,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ screenOnEvent =
+ CreateScreenStateChangedEvent(configAddedTimeNs + 200, android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + 250,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ screenOnEvent =
+ CreateScreenStateChangedEvent(configAddedTimeNs + 300, android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ // Not logged. max_num_gauge_atoms_per_bucket already hit.
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + 325,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ // Pulling alarm arrives on time and reset the sequential pulling alarm.
+ processor->informPullAlarmFired(nextPullTimeNs + 1);
+
+ screenOnEvent = CreateScreenStateChangedEvent(configAddedTimeNs + bucketSizeNs + 10,
+ android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + bucketSizeNs + 100,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ processor->informPullAlarmFired(nextPullTimeNs + 2);
+
+ screenOnEvent = CreateScreenStateChangedEvent(configAddedTimeNs + (3 * bucketSizeNs) + 15,
+ android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ processor->informPullAlarmFired(nextPullTimeNs + 4);
+
+ ConfigMetricsReportList reports;
+ vector<uint8_t> buffer;
+ processor->onDumpReport(cfgKey, configAddedTimeNs + (4 * bucketSizeNs) + 10, false, true,
+ ADB_DUMP, FAST, &buffer);
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ backfillDimensionPath(&reports);
+ backfillStringInReport(&reports);
+ backfillStartEndTimestamp(&reports);
+ backfillAggregatedAtoms(&reports);
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ StatsLogReport::GaugeMetricDataWrapper gaugeMetrics;
+ sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).gauge_metrics(), &gaugeMetrics);
+ ASSERT_GT((int)gaugeMetrics.data_size(), 1);
+
+ auto data = gaugeMetrics.data(1);
+ EXPECT_EQ(ATOM_TAG, data.dimensions_in_what().field());
+ ASSERT_EQ(1, data.dimensions_in_what().value_tuple().dimensions_value_size());
+ EXPECT_EQ(1 /* subsystem name field */,
+ data.dimensions_in_what().value_tuple().dimensions_value(0).field());
+ EXPECT_FALSE(data.dimensions_in_what().value_tuple().dimensions_value(0).value_str().empty());
+ ASSERT_EQ(3, data.bucket_info_size());
+
+ ASSERT_EQ(3, data.bucket_info(0).atom_size());
+ ASSERT_EQ(3, data.bucket_info(0).elapsed_timestamp_nanos_size());
+ ValidateGaugeBucketTimes(data.bucket_info(0),
+ /*startTimeNs=*/configAddedTimeNs,
+ /*endTimeNs=*/configAddedTimeNs + bucketSizeNs,
+ /*eventTimesNs=*/
+ {(int64_t)(configAddedTimeNs + 55), (int64_t)(configAddedTimeNs + 150),
+ (int64_t)(configAddedTimeNs + 250)});
+
+ ASSERT_EQ(2, data.bucket_info(1).atom_size());
+ ASSERT_EQ(2, data.bucket_info(1).elapsed_timestamp_nanos_size());
+ ValidateGaugeBucketTimes(data.bucket_info(1),
+ /*startTimeNs=*/configAddedTimeNs + bucketSizeNs,
+ /*endTimeNs=*/configAddedTimeNs + (2 * bucketSizeNs),
+ /*eventTimesNs=*/
+ {(int64_t)(configAddedTimeNs + bucketSizeNs + 1),
+ (int64_t)(configAddedTimeNs + bucketSizeNs + 100)});
+
+ ASSERT_EQ(1, data.bucket_info(2).atom_size());
+ ASSERT_EQ(1, data.bucket_info(2).elapsed_timestamp_nanos_size());
+ ValidateGaugeBucketTimes(
+ data.bucket_info(2), /*startTimeNs=*/configAddedTimeNs + (2 * bucketSizeNs),
+ /*endTimeNs=*/configAddedTimeNs + (3 * bucketSizeNs),
+ /*eventTimesNs=*/{(int64_t)(configAddedTimeNs + (2 * bucketSizeNs) + 2)});
+}
+
+TEST(GaugeMetricE2ePulledTest, TestConditionChangeToTrueSamplePulledEvents) {
auto config = CreateStatsdConfig(GaugeMetric::CONDITION_CHANGE_TO_TRUE);
int64_t baseTimeNs = getElapsedRealtimeNs();
int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
@@ -315,7 +428,7 @@
EXPECT_GT(data.bucket_info(2).atom(1).subsystem_sleep_state().time_millis(), 0);
}
-TEST_F(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvent_LateAlarm) {
+TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEvent_LateAlarm) {
auto config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE);
int64_t baseTimeNs = getElapsedRealtimeNs();
int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
@@ -413,7 +526,7 @@
EXPECT_GT(data.bucket_info(2).atom(0).subsystem_sleep_state().time_millis(), 0);
}
-TEST_F(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsWithActivation) {
+TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsWithActivation) {
auto config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE, /*useCondition=*/false);
int64_t baseTimeNs = getElapsedRealtimeNs();
@@ -430,6 +543,8 @@
event_activation->set_atom_matcher_id(batterySaverStartMatcher.id());
event_activation->set_ttl_seconds(ttlNs / 1000000000);
+ StatsdStats::getInstance().reset();
+
ConfigKey cfgKey;
auto processor =
CreateStatsLogProcessor(baseTimeNs, configAddedTimeNs, config, cfgKey,
@@ -438,10 +553,10 @@
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
processor->mPullerManager->ForceClearPullerCache();
- int startBucketNum = processor->mMetricsManagers.begin()
- ->second->mAllMetricProducers[0]
- ->getCurrentBucketNum();
- EXPECT_GT(startBucketNum, (int64_t)0);
+ const int startBucketNum = processor->mMetricsManagers.begin()
+ ->second->mAllMetricProducers[0]
+ ->getCurrentBucketNum();
+ EXPECT_EQ(startBucketNum, 2);
EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
// When creating the config, the gauge metric producer should register the alarm at the
@@ -453,13 +568,43 @@
processor->mPullerManager->mReceivers.begin()->second.front().nextPullTimeNs;
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + bucketSizeNs, nextPullTimeNs);
+ // Check no pull occurred on metric initialization when it's not active.
+ const int64_t metricInitTimeNs = configAddedTimeNs + 1; // 10 mins + 1 ns.
+ processor->onStatsdInitCompleted(metricInitTimeNs);
+ StatsdStatsReport_PulledAtomStats pulledAtomStats =
+ getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
+ EXPECT_EQ(pulledAtomStats.atom_id(), ATOM_TAG);
+ EXPECT_EQ(pulledAtomStats.total_pull(), 0);
+
+ // Check no pull occurred on app upgrade when metric is not active.
+ const int64_t appUpgradeTimeNs = metricInitTimeNs + 1; // 10 mins + 2 ns.
+ processor->notifyAppUpgrade(appUpgradeTimeNs, "appName", 1000 /* uid */, 2 /* version */);
+ pulledAtomStats = getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
+ EXPECT_EQ(pulledAtomStats.atom_id(), ATOM_TAG);
+ EXPECT_EQ(pulledAtomStats.total_pull(), 0);
+
+ // Check skipped bucket is not added when metric is not active.
+ int64_t dumpReportTimeNs = appUpgradeTimeNs + 1; // 10 mins + 3 ns.
+ vector<uint8_t> buffer;
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, true /* include_current_partial_bucket */,
+ true /* erase_data */, ADB_DUMP, NO_TIME_CONSTRAINTS, &buffer);
+ ConfigMetricsReportList reports;
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ StatsLogReport::GaugeMetricDataWrapper gaugeMetrics =
+ reports.reports(0).metrics(0).gauge_metrics();
+ EXPECT_EQ(gaugeMetrics.skipped_size(), 0);
+
// Pulling alarm arrives on time and reset the sequential pulling alarm.
// Event should not be kept.
processor->informPullAlarmFired(nextPullTimeNs + 1); // 15 mins + 1 ns.
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 2 * bucketSizeNs, nextPullTimeNs);
EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
- // Activate the metric. A pull occurs upon activation.
+ // Activate the metric. A pull occurs upon activation. The event is kept. 1 total
+ // 15 mins + 2 ms
const int64_t activationNs = configAddedTimeNs + bucketSizeNs + (2 * 1000 * 1000); // 2 millis.
auto batterySaverOnEvent = CreateBatterySaverOnEvent(activationNs);
processor->OnLogEvent(batterySaverOnEvent.get()); // 15 mins + 2 ms.
@@ -474,19 +619,27 @@
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 4 * bucketSizeNs, nextPullTimeNs);
// Create random event to deactivate metric.
- auto deactivationEvent = CreateScreenBrightnessChangedEvent(activationNs + ttlNs + 1, 50);
+ // A pull should not occur here. 3 total.
+ // 25 mins + 2 ms + 1 ns.
+ const int64_t deactivationNs = activationNs + ttlNs + 1;
+ auto deactivationEvent = CreateScreenBrightnessChangedEvent(deactivationNs, 50);
processor->OnLogEvent(deactivationEvent.get());
EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
// Event should not be kept. 3 total.
+ // 30 mins + 3 ns.
processor->informPullAlarmFired(nextPullTimeNs + 3);
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 5 * bucketSizeNs, nextPullTimeNs);
+ // Event should not be kept. 3 total.
+ // 35 mins + 2 ns.
processor->informPullAlarmFired(nextPullTimeNs + 2);
+ EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 6 * bucketSizeNs, nextPullTimeNs);
- ConfigMetricsReportList reports;
- vector<uint8_t> buffer;
- processor->onDumpReport(cfgKey, configAddedTimeNs + 7 * bucketSizeNs + 10, false, true,
+ buffer.clear();
+ // 40 mins + 10 ns.
+ processor->onDumpReport(cfgKey, configAddedTimeNs + 6 * bucketSizeNs + 10,
+ false /* include_current_partial_bucket */, true /* erase_data */,
ADB_DUMP, FAST, &buffer);
EXPECT_TRUE(buffer.size() > 0);
EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
@@ -496,7 +649,7 @@
backfillAggregatedAtoms(&reports);
ASSERT_EQ(1, reports.reports_size());
ASSERT_EQ(1, reports.reports(0).metrics_size());
- StatsLogReport::GaugeMetricDataWrapper gaugeMetrics;
+ gaugeMetrics = StatsLogReport::GaugeMetricDataWrapper();
sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).gauge_metrics(), &gaugeMetrics);
ASSERT_GT((int)gaugeMetrics.data_size(), 0);
@@ -535,13 +688,185 @@
ASSERT_EQ(0, bucketInfo.wall_clock_timestamp_nanos_size());
EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 5 * bucketSizeNs)),
bucketInfo.start_bucket_elapsed_nanos());
- EXPECT_EQ(MillisToNano(NanoToMillis(activationNs + ttlNs + 1)),
- bucketInfo.end_bucket_elapsed_nanos());
+ EXPECT_EQ(MillisToNano(NanoToMillis(deactivationNs)), bucketInfo.end_bucket_elapsed_nanos());
EXPECT_TRUE(bucketInfo.atom(0).subsystem_sleep_state().subsystem_name().empty());
EXPECT_GT(bucketInfo.atom(0).subsystem_sleep_state().time_millis(), 0);
+
+ // Check skipped bucket is not added after deactivation.
+ dumpReportTimeNs = configAddedTimeNs + 8 * bucketSizeNs + 10;
+ buffer.clear();
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, true /* include_current_partial_bucket */,
+ true /* erase_data */, ADB_DUMP, NO_TIME_CONSTRAINTS, &buffer);
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ gaugeMetrics = reports.reports(0).metrics(0).gauge_metrics();
+ EXPECT_EQ(gaugeMetrics.skipped_size(), 0);
}
-TEST_F(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsNoCondition) {
+TEST(GaugeMetricE2ePulledTest, TestFirstNSamplesPulledNoTriggerWithActivation) {
+ StatsdConfig config = CreateStatsdConfig(GaugeMetric::FIRST_N_SAMPLES);
+ auto gaugeMetric = config.mutable_gauge_metric(0);
+ gaugeMetric->set_max_num_gauge_atoms_per_bucket(2);
+ int64_t baseTimeNs = getElapsedRealtimeNs();
+ int64_t configAddedTimeNs = 10 * 60 * NS_PER_SEC + baseTimeNs;
+ int64_t bucketSizeNs = TimeUnitToBucketSizeInMillis(config.gauge_metric(0).bucket()) * 1000000;
+
+ auto batterySaverStartMatcher = CreateBatterySaverModeStartAtomMatcher();
+ *config.add_atom_matcher() = batterySaverStartMatcher;
+ const int64_t ttlNs = 2 * bucketSizeNs; // Two buckets.
+ auto metric_activation = config.add_metric_activation();
+ metric_activation->set_metric_id(metricId);
+ metric_activation->set_activation_type(ACTIVATE_IMMEDIATELY);
+ auto event_activation = metric_activation->add_event_activation();
+ event_activation->set_atom_matcher_id(batterySaverStartMatcher.id());
+ event_activation->set_ttl_seconds(ttlNs / NS_PER_SEC);
+
+ StatsdStats::getInstance().reset();
+
+ ConfigKey cfgKey;
+ auto processor =
+ CreateStatsLogProcessor(baseTimeNs, configAddedTimeNs, config, cfgKey,
+ SharedRefBase::make<FakeSubsystemSleepCallback>(), ATOM_TAG);
+ ASSERT_EQ(processor->mMetricsManagers.size(), 1u);
+ processor->mPullerManager->ForceClearPullerCache();
+
+ EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
+
+ // When creating the config, the gauge metric producer should register the alarm at the
+ // end of the current bucket.
+ ASSERT_EQ((size_t)1, processor->mPullerManager->mReceivers.size());
+ EXPECT_EQ(bucketSizeNs,
+ processor->mPullerManager->mReceivers.begin()->second.front().intervalNs);
+ int64_t& nextPullTimeNs =
+ processor->mPullerManager->mReceivers.begin()->second.front().nextPullTimeNs;
+
+ // Condition true but Active false
+ auto screenOffEvent =
+ CreateScreenStateChangedEvent(configAddedTimeNs + 55, android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ auto screenOnEvent =
+ CreateScreenStateChangedEvent(configAddedTimeNs + 100, android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ // Pulling alarm arrives on time and reset the sequential pulling alarm.
+ // Event should not be kept.
+ processor->informPullAlarmFired(nextPullTimeNs + 1); // 15 mins + 1 ns.
+ EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
+
+ // Activate the metric. A pull occurs upon activation. The event is not kept. 0 total
+ // 15 mins + 1000 ns.
+ const int64_t activationNs = configAddedTimeNs + bucketSizeNs + 1000;
+ auto batterySaverOnEvent = CreateBatterySaverOnEvent(activationNs);
+ processor->OnLogEvent(batterySaverOnEvent.get()); // 15 mins + 1000 ns.
+ EXPECT_TRUE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
+
+ // A pull occurs upon condition change. The event is kept. 1 total. 1 in bucket
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + bucketSizeNs + 150,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ screenOnEvent = CreateScreenStateChangedEvent(configAddedTimeNs + bucketSizeNs + 200,
+ android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ // A pull occurs upon condition change. The event is kept. 1 total. 2 in bucket
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + bucketSizeNs + 250,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ screenOnEvent = CreateScreenStateChangedEvent(configAddedTimeNs + bucketSizeNs + 300,
+ android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ // A pull occurs upon condition change. The event is not kept due to
+ // max_num_gauge_atoms_per_bucket. 1 total. 2 total in bucket
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + bucketSizeNs + 325,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ screenOnEvent = CreateScreenStateChangedEvent(configAddedTimeNs + bucketSizeNs + 375,
+ android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+ // Condition false but Active true
+
+ // This event should not be kept. 1 total.
+ processor->informPullAlarmFired(nextPullTimeNs + 1); // 20 mins + 1 ns.
+
+ // This event should not be kept. 1 total.
+ processor->informPullAlarmFired(nextPullTimeNs + 2); // 25 mins + 2 ns.
+
+ // A pull occurs upon condition change. The event is kept. 2 total. 1 in bucket
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + 3 * bucketSizeNs + 50,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+ // Condition true but Active true
+
+ // Create random event to deactivate metric.
+ // A pull should not occur here. 2 total. 1 in bucket.
+ // 25 mins + 1000 ns + 1 ns.
+ const int64_t deactivationNs = activationNs + ttlNs + 1;
+ auto deactivationEvent = CreateScreenBrightnessChangedEvent(deactivationNs, 50);
+ processor->OnLogEvent(deactivationEvent.get());
+ EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
+ // Condition true but Active false
+
+ screenOnEvent = CreateScreenStateChangedEvent(configAddedTimeNs + 3 * bucketSizeNs + 50,
+ android::view::DISPLAY_STATE_ON);
+ processor->OnLogEvent(screenOnEvent.get());
+
+ screenOffEvent = CreateScreenStateChangedEvent(configAddedTimeNs + 3 * bucketSizeNs + 100,
+ android::view::DISPLAY_STATE_OFF);
+ processor->OnLogEvent(screenOffEvent.get());
+
+ vector<uint8_t> buffer;
+ // 30 mins + 10 ns.
+ processor->onDumpReport(cfgKey, configAddedTimeNs + 4 * bucketSizeNs + 10,
+ false /* include_current_partial_bucket */, true /* erase_data */,
+ ADB_DUMP, FAST, &buffer);
+ ConfigMetricsReportList reports;
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ backfillDimensionPath(&reports);
+ backfillStringInReport(&reports);
+ backfillStartEndTimestamp(&reports);
+ backfillAggregatedAtoms(&reports);
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ StatsLogReport::GaugeMetricDataWrapper gaugeMetrics = StatsLogReport::GaugeMetricDataWrapper();
+ sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).gauge_metrics(), &gaugeMetrics);
+ ASSERT_GT((int)gaugeMetrics.data_size(), 0);
+
+ auto data = gaugeMetrics.data(0);
+ EXPECT_EQ(ATOM_TAG, data.dimensions_in_what().field());
+ ASSERT_EQ(1, data.dimensions_in_what().value_tuple().dimensions_value_size());
+ EXPECT_EQ(1 /* subsystem name field */,
+ data.dimensions_in_what().value_tuple().dimensions_value(0).field());
+ EXPECT_FALSE(data.dimensions_in_what().value_tuple().dimensions_value(0).value_str().empty());
+ ASSERT_EQ(2, data.bucket_info_size());
+
+ ASSERT_EQ(2, data.bucket_info(0).atom_size());
+ ASSERT_EQ(2, data.bucket_info(0).elapsed_timestamp_nanos_size());
+ ValidateGaugeBucketTimes(data.bucket_info(0),
+ /*startTimeNs=*/configAddedTimeNs + bucketSizeNs,
+ /*endTimeNs=*/configAddedTimeNs + (2 * bucketSizeNs),
+ /*eventTimesNs=*/
+ {(int64_t)(configAddedTimeNs + bucketSizeNs + 150),
+ (int64_t)(configAddedTimeNs + bucketSizeNs + 250)});
+
+ ASSERT_EQ(1, data.bucket_info(1).atom_size());
+ ASSERT_EQ(1, data.bucket_info(1).elapsed_timestamp_nanos_size());
+ ValidateGaugeBucketTimes(data.bucket_info(1),
+ /*startTimeNs=*/
+ MillisToNano(NanoToMillis(configAddedTimeNs + (3 * bucketSizeNs))),
+ /*endTimeNs=*/MillisToNano(NanoToMillis(deactivationNs)),
+ /*eventTimesNs=*/
+ {(int64_t)(configAddedTimeNs + (3 * bucketSizeNs) + 50)});
+}
+
+TEST(GaugeMetricE2ePulledTest, TestRandomSamplePulledEventsNoCondition) {
auto config = CreateStatsdConfig(GaugeMetric::RANDOM_ONE_SAMPLE, /*useCondition=*/false);
int64_t baseTimeNs = getElapsedRealtimeNs();
diff --git a/statsd/tests/e2e/GaugeMetric_e2e_push_test.cpp b/statsd/tests/e2e/GaugeMetric_e2e_push_test.cpp
index f5a0158..81192b2 100644
--- a/statsd/tests/e2e/GaugeMetric_e2e_push_test.cpp
+++ b/statsd/tests/e2e/GaugeMetric_e2e_push_test.cpp
@@ -414,6 +414,93 @@
}
}
+TEST_F(GaugeMetricE2ePushedTest, TestDimensionalSampling) {
+ ShardOffsetProvider::getInstance().setShardOffset(5);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+
+ AtomMatcher appCrashMatcher =
+ CreateSimpleAtomMatcher("APP_CRASH_OCCURRED", util::APP_CRASH_OCCURRED);
+ *config.add_atom_matcher() = appCrashMatcher;
+
+ GaugeMetric sampledGaugeMetric =
+ createGaugeMetric("GaugeSampledAppCrashesPerUid", appCrashMatcher.id(),
+ GaugeMetric::FIRST_N_SAMPLES, nullopt, nullopt);
+ *sampledGaugeMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /* uid */});
+ *sampledGaugeMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ sampledGaugeMetric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_gauge_metric() = sampledGaugeMetric;
+
+ const int64_t configAddedTimeNs = 10 * NS_PER_SEC; // 0:10
+ const int64_t bucketSizeNs =
+ TimeUnitToBucketSizeInMillis(config.gauge_metric(0).bucket()) * 1000LL * 1000LL;
+
+ int uid = 12345;
+ int64_t cfgId = 98765;
+ ConfigKey cfgKey(uid, cfgId);
+
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ configAddedTimeNs, configAddedTimeNs, config, cfgKey, nullptr, 0, new UidMap());
+
+ int appUid1 = 1001; // odd hash value
+ int appUid2 = 1002; // even hash value
+ int appUid3 = 1003; // odd hash value
+
+ const int64_t gaugeEventTimeNs1 = configAddedTimeNs + 20 * NS_PER_SEC;
+ const int64_t gaugeEventTimeNs2 = configAddedTimeNs + 40 * NS_PER_SEC;
+ const int64_t gaugeEventTimeNs3 = configAddedTimeNs + 60 * NS_PER_SEC;
+ const int64_t gaugeEventTimeNs4 = configAddedTimeNs + 100 * NS_PER_SEC;
+ const int64_t gaugeEventTimeNs5 = configAddedTimeNs + 110 * NS_PER_SEC;
+ const int64_t gaugeEventTimeNs6 = configAddedTimeNs + 150 * NS_PER_SEC;
+
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateAppCrashOccurredEvent(gaugeEventTimeNs1, appUid1)); // 0:30
+ events.push_back(CreateAppCrashOccurredEvent(gaugeEventTimeNs2, appUid2)); // 0:50
+ events.push_back(CreateAppCrashOccurredEvent(gaugeEventTimeNs3, appUid3)); // 1:10
+ events.push_back(CreateAppCrashOccurredEvent(gaugeEventTimeNs4, appUid1)); // 1:50
+ events.push_back(CreateAppCrashOccurredEvent(gaugeEventTimeNs5, appUid2)); // 2:00
+ events.push_back(CreateAppCrashOccurredEvent(gaugeEventTimeNs6, appUid3)); // 2:40
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ ConfigMetricsReportList reports;
+ vector<uint8_t> buffer;
+ processor->onDumpReport(cfgKey, configAddedTimeNs + bucketSizeNs + 1, false, true, ADB_DUMP,
+ FAST, &buffer);
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ backfillDimensionPath(&reports);
+ backfillStringInReport(&reports);
+ backfillStartEndTimestamp(&reports);
+ backfillAggregatedAtoms(&reports);
+
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ EXPECT_TRUE(reports.reports(0).metrics(0).has_gauge_metrics());
+ StatsLogReport::GaugeMetricDataWrapper gaugeMetrics;
+ sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).gauge_metrics(), &gaugeMetrics);
+ ASSERT_EQ(2, gaugeMetrics.data_size());
+
+ // Only Uid 1 and 3 are logged. (odd hash value) + (offset of 5) % (shard count of 2) = 0
+ GaugeMetricData data = gaugeMetrics.data(0);
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, appUid1);
+ ValidateGaugeBucketTimes(data.bucket_info(0), configAddedTimeNs,
+ configAddedTimeNs + bucketSizeNs,
+ {gaugeEventTimeNs1, gaugeEventTimeNs4});
+
+ data = gaugeMetrics.data(1);
+ ValidateUidDimension(data.dimensions_in_what(), util::APP_CRASH_OCCURRED, appUid3);
+ ValidateGaugeBucketTimes(data.bucket_info(0), configAddedTimeNs,
+ configAddedTimeNs + bucketSizeNs,
+ {gaugeEventTimeNs3, gaugeEventTimeNs6});
+}
+
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif
diff --git a/statsd/tests/e2e/KllMetric_e2e_test.cpp b/statsd/tests/e2e/KllMetric_e2e_test.cpp
index b62a133..a6100c2 100644
--- a/statsd/tests/e2e/KllMetric_e2e_test.cpp
+++ b/statsd/tests/e2e/KllMetric_e2e_test.cpp
@@ -25,6 +25,21 @@
using namespace std;
+namespace {
+
+unique_ptr<LogEvent> CreateTestAtomReportedEvent(const uint64_t timestampNs, const long longField,
+ const string& stringField) {
+ return CreateTestAtomReportedEvent(
+ timestampNs, /* attributionUids */ {1001},
+ /* attributionTags */ {"app1"}, /* intField */ 0, longField, /* floatField */ 0.0f,
+ stringField, /* boolField */ false, TestAtomReported::OFF, /* bytesField */ {},
+ /* repeatedIntField */ {}, /* repeatedLongField */ {}, /* repeatedFloatField */ {},
+ /* repeatedStringField */ {}, /* repeatedBoolField */ {},
+ /* repeatedBoolFieldLength */ 0, /* repeatedEnumField */ {});
+}
+
+} // anonymous namespace.
+
class KllMetricE2eTest : public ::testing::Test {
protected:
void SetUp() override {
@@ -67,7 +82,8 @@
uint64_t dumpTimeNs = bucketStartTimeNs + bucketSizeNs;
ConfigMetricsReportList reports;
vector<uint8_t> buffer;
- processor->onDumpReport(key, dumpTimeNs, true, true, ADB_DUMP, FAST, &buffer);
+ processor->onDumpReport(key, dumpTimeNs, /*include_current_bucket*/ false, true, ADB_DUMP, FAST,
+ &buffer);
EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
backfillDimensionPath(&reports);
backfillStringInReport(&reports);
@@ -89,6 +105,75 @@
EXPECT_EQ(metricReport.kll_metrics().skipped_size(), 0);
}
+TEST_F(KllMetricE2eTest, TestMetricWithDimensions) {
+ whatMatcher = CreateSimpleAtomMatcher("TestAtomReported", util::TEST_ATOM_REPORTED);
+ metric = createKllMetric("TestAtomMetric", whatMatcher, /* kllField */ 3,
+ /* condition */ nullopt);
+
+ *metric.mutable_dimensions_in_what() =
+ CreateDimensions(util::TEST_ATOM_REPORTED, {5 /* string_field */});
+
+ config.clear_atom_matcher();
+ *config.add_atom_matcher() = whatMatcher;
+
+ config.clear_kll_metric();
+ *config.add_kll_metric() = metric;
+
+ events.clear();
+ events.push_back(CreateTestAtomReportedEvent(bucketStartTimeNs + 5 * NS_PER_SEC, 5l, "dim_1"));
+ events.push_back(CreateTestAtomReportedEvent(bucketStartTimeNs + 15 * NS_PER_SEC, 6l, "dim_2"));
+ events.push_back(CreateTestAtomReportedEvent(bucketStartTimeNs + 25 * NS_PER_SEC, 7l, "dim_1"));
+
+ const sp<StatsLogProcessor> processor =
+ CreateStatsLogProcessor(bucketStartTimeNs, bucketStartTimeNs, config, key);
+
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ uint64_t dumpTimeNs = bucketStartTimeNs + bucketSizeNs;
+ ConfigMetricsReportList reports;
+ vector<uint8_t> buffer;
+ processor->onDumpReport(key, dumpTimeNs, /*include_current_bucket*/ false, true, ADB_DUMP, FAST,
+ &buffer);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ backfillDimensionPath(&reports);
+ backfillStringInReport(&reports);
+ backfillStartEndTimestamp(&reports);
+ ASSERT_EQ(reports.reports_size(), 1);
+
+ ConfigMetricsReport report = reports.reports(0);
+ ASSERT_EQ(report.metrics_size(), 1);
+ StatsLogReport metricReport = report.metrics(0);
+ EXPECT_EQ(metricReport.metric_id(), metric.id());
+ EXPECT_TRUE(metricReport.has_kll_metrics());
+ ASSERT_EQ(metricReport.kll_metrics().data_size(), 2);
+
+ KllMetricData data = metricReport.kll_metrics().data(0);
+ ASSERT_EQ(data.bucket_info_size(), 1);
+ KllBucketInfo bucket = data.bucket_info(0);
+ EXPECT_EQ(bucket.start_bucket_elapsed_nanos(), bucketStartTimeNs);
+ EXPECT_EQ(bucket.end_bucket_elapsed_nanos(), bucketStartTimeNs + bucketSizeNs);
+ EXPECT_EQ(bucket.sketches_size(), 1);
+ EXPECT_EQ(metricReport.kll_metrics().skipped_size(), 0);
+ EXPECT_EQ(data.dimensions_in_what().field(), util::TEST_ATOM_REPORTED);
+ ASSERT_EQ(data.dimensions_in_what().value_tuple().dimensions_value_size(), 1);
+ EXPECT_EQ(data.dimensions_in_what().value_tuple().dimensions_value(0).field(), 5);
+ EXPECT_EQ(data.dimensions_in_what().value_tuple().dimensions_value(0).value_str(), "dim_1");
+
+ data = metricReport.kll_metrics().data(1);
+ ASSERT_EQ(data.bucket_info_size(), 1);
+ bucket = data.bucket_info(0);
+ EXPECT_EQ(bucket.start_bucket_elapsed_nanos(), bucketStartTimeNs);
+ EXPECT_EQ(bucket.end_bucket_elapsed_nanos(), bucketStartTimeNs + bucketSizeNs);
+ EXPECT_EQ(bucket.sketches_size(), 1);
+ EXPECT_EQ(metricReport.kll_metrics().skipped_size(), 0);
+ EXPECT_EQ(data.dimensions_in_what().field(), util::TEST_ATOM_REPORTED);
+ ASSERT_EQ(data.dimensions_in_what().value_tuple().dimensions_value_size(), 1);
+ EXPECT_EQ(data.dimensions_in_what().value_tuple().dimensions_value(0).field(), 5);
+ EXPECT_EQ(data.dimensions_in_what().value_tuple().dimensions_value(0).value_str(), "dim_2");
+}
+
TEST_F(KllMetricE2eTest, TestInitWithKllFieldPositionALL) {
// Create config.
StatsdConfig config;
@@ -119,6 +204,97 @@
ASSERT_EQ(0, processor->mMetricsManagers.size());
}
+TEST_F(KllMetricE2eTest, TestDimensionalSampling) {
+ ShardOffsetProvider::getInstance().setShardOffset(5);
+
+ // Create config.
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+
+ AtomMatcher bleScanResultReceivedMatcher = CreateSimpleAtomMatcher(
+ "BleScanResultReceivedAtomMatcher", util::BLE_SCAN_RESULT_RECEIVED);
+ *config.add_atom_matcher() = bleScanResultReceivedMatcher;
+
+ // Create kll metric.
+ KllMetric sampledKllMetric =
+ createKllMetric("KllSampledBleScanResultsPerUid", bleScanResultReceivedMatcher,
+ /*num_results=*/2, nullopt);
+ *sampledKllMetric.mutable_dimensions_in_what() =
+ CreateAttributionUidDimensions(util::BLE_SCAN_RESULT_RECEIVED, {Position::FIRST});
+ *sampledKllMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateAttributionUidDimensions(util::BLE_SCAN_RESULT_RECEIVED, {Position::FIRST});
+ sampledKllMetric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_kll_metric() = sampledKllMetric;
+
+ // Initialize StatsLogProcessor.
+ const uint64_t bucketStartTimeNs = 10000000000; // 0:10
+ int uid = 12345;
+ int64_t cfgId = 98765;
+ ConfigKey cfgKey(uid, cfgId);
+
+ sp<StatsLogProcessor> processor = CreateStatsLogProcessor(
+ bucketStartTimeNs, bucketStartTimeNs, config, cfgKey, nullptr, 0, new UidMap());
+
+ int appUid1 = 1001; // odd hash value
+ int appUid2 = 1002; // even hash value
+ int appUid3 = 1003; // odd hash value
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateBleScanResultReceivedEvent(bucketStartTimeNs + 20 * NS_PER_SEC,
+ {appUid1}, {"tag1"}, 10));
+ events.push_back(CreateBleScanResultReceivedEvent(bucketStartTimeNs + 40 * NS_PER_SEC,
+ {appUid2}, {"tag2"}, 10));
+ events.push_back(CreateBleScanResultReceivedEvent(bucketStartTimeNs + 60 * NS_PER_SEC,
+ {appUid3}, {"tag3"}, 10));
+
+ events.push_back(CreateBleScanResultReceivedEvent(bucketStartTimeNs + 120 * NS_PER_SEC,
+ {appUid1}, {"tag1"}, 11));
+ events.push_back(CreateBleScanResultReceivedEvent(bucketStartTimeNs + 140 * NS_PER_SEC,
+ {appUid2}, {"tag2"}, 12));
+ events.push_back(CreateBleScanResultReceivedEvent(bucketStartTimeNs + 160 * NS_PER_SEC,
+ {appUid3}, {"tag3"}, 13));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ // Check dump report.
+ vector<uint8_t> buffer;
+ ConfigMetricsReportList reports;
+ processor->onDumpReport(cfgKey, bucketStartTimeNs + bucketSizeNs + 1, false, true, ADB_DUMP,
+ FAST, &buffer);
+ ASSERT_GT(buffer.size(), 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ backfillDimensionPath(&reports);
+ backfillStringInReport(&reports);
+ backfillStartEndTimestamp(&reports);
+ backfillAggregatedAtoms(&reports);
+
+ ConfigMetricsReport report = reports.reports(0);
+ ASSERT_EQ(report.metrics_size(), 1);
+ StatsLogReport metricReport = report.metrics(0);
+ EXPECT_EQ(metricReport.metric_id(), sampledKllMetric.id());
+ EXPECT_TRUE(metricReport.has_kll_metrics());
+ StatsLogReport::KllMetricDataWrapper kllMetrics;
+ sortMetricDataByDimensionsValue(metricReport.kll_metrics(), &kllMetrics);
+ ASSERT_EQ(kllMetrics.data_size(), 2);
+ EXPECT_EQ(kllMetrics.skipped_size(), 0);
+
+ // Only Uid 1 and 3 are logged. (odd hash value) + (offset of 5) % (shard count of 2) = 0
+ KllMetricData data = kllMetrics.data(0);
+ ValidateAttributionUidDimension(data.dimensions_in_what(), util::BLE_SCAN_RESULT_RECEIVED,
+ appUid1);
+ ValidateKllBucket(data.bucket_info(0), bucketStartTimeNs, bucketStartTimeNs + bucketSizeNs, {2},
+ 0);
+
+ data = kllMetrics.data(1);
+ ValidateAttributionUidDimension(data.dimensions_in_what(), util::BLE_SCAN_RESULT_RECEIVED,
+ appUid3);
+ ValidateKllBucket(data.bucket_info(0), bucketStartTimeNs, bucketStartTimeNs + bucketSizeNs, {2},
+ 0);
+}
+
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif
diff --git a/statsd/tests/e2e/MetricActivation_e2e_test.cpp b/statsd/tests/e2e/MetricActivation_e2e_test.cpp
index e320419..3c625ac 100644
--- a/statsd/tests/e2e/MetricActivation_e2e_test.cpp
+++ b/statsd/tests/e2e/MetricActivation_e2e_test.cpp
@@ -252,6 +252,9 @@
long timeBase1 = 1;
int broadcastCount = 0;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
StatsLogProcessor processor(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, bucketStartTimeNs,
[](const ConfigKey& key) { return true; },
@@ -263,7 +266,11 @@
activeConfigsBroadcast.insert(activeConfigsBroadcast.end(), activeConfigs.begin(),
activeConfigs.end());
return true;
- });
+ },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &processor)).Times(1);
processor.OnConfigUpdated(bucketStartTimeNs, cfgKey, config);
@@ -463,6 +470,9 @@
long timeBase1 = 1;
int broadcastCount = 0;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
StatsLogProcessor processor(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, bucketStartTimeNs,
[](const ConfigKey& key) { return true; },
@@ -474,7 +484,11 @@
activeConfigsBroadcast.insert(activeConfigsBroadcast.end(), activeConfigs.begin(),
activeConfigs.end());
return true;
- });
+ },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &processor)).Times(1);
processor.OnConfigUpdated(bucketStartTimeNs, cfgKey, config);
@@ -784,6 +798,9 @@
long timeBase1 = 1;
int broadcastCount = 0;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
StatsLogProcessor processor(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, bucketStartTimeNs,
[](const ConfigKey& key) { return true; },
@@ -795,7 +812,11 @@
activeConfigsBroadcast.insert(activeConfigsBroadcast.end(), activeConfigs.begin(),
activeConfigs.end());
return true;
- });
+ },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &processor)).Times(1);
processor.OnConfigUpdated(bucketStartTimeNs, cfgKey, config);
@@ -1117,6 +1138,9 @@
long timeBase1 = 1;
int broadcastCount = 0;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
StatsLogProcessor processor(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, bucketStartTimeNs,
[](const ConfigKey& key) { return true; },
@@ -1128,7 +1152,11 @@
activeConfigsBroadcast.insert(activeConfigsBroadcast.end(), activeConfigs.begin(),
activeConfigs.end());
return true;
- });
+ },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &processor)).Times(1);
processor.OnConfigUpdated(bucketStartTimeNs, cfgKey, config);
@@ -1314,6 +1342,9 @@
long timeBase1 = 1;
int broadcastCount = 0;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter = std::make_shared<MockLogEventFilter>();
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(StatsLogProcessor::getDefaultAtomIdSet(), _))
+ .Times(1);
StatsLogProcessor processor(
m, pullerManager, anomalyAlarmMonitor, subscriberAlarmMonitor, bucketStartTimeNs,
[](const ConfigKey& key) { return true; },
@@ -1325,7 +1356,11 @@
activeConfigsBroadcast.insert(activeConfigsBroadcast.end(), activeConfigs.begin(),
activeConfigs.end());
return true;
- });
+ },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, mockLogEventFilter);
+
+ const LogEventFilter::AtomIdSet atomIdsList = CreateAtomIdSetFromConfig(config);
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(atomIdsList, &processor)).Times(1);
processor.OnConfigUpdated(bucketStartTimeNs, cfgKey, config);
diff --git a/statsd/tests/e2e/PartialBucket_e2e_test.cpp b/statsd/tests/e2e/PartialBucket_e2e_test.cpp
index fe2bdcf..46d0d5d 100644
--- a/statsd/tests/e2e/PartialBucket_e2e_test.cpp
+++ b/statsd/tests/e2e/PartialBucket_e2e_test.cpp
@@ -20,10 +20,11 @@
#include "src/StatsLogProcessor.h"
#include "src/StatsService.h"
+#include "src/packages/UidMap.h"
#include "src/stats_log_util.h"
#include "tests/statsd_test_util.h"
-using::ndk::SharedRefBase;
+using ::ndk::SharedRefBase;
using std::shared_ptr;
namespace android {
@@ -33,27 +34,6 @@
#ifdef __ANDROID__
namespace {
const string kApp1 = "app1.sharing.1";
-const int kConfigKey = 789130123; // Randomly chosen to avoid collisions with existing configs.
-const int kCallingUid = 0; // Randomly chosen
-
-void SendConfig(shared_ptr<StatsService>& service, const StatsdConfig& config) {
- string str;
- config.SerializeToString(&str);
- std::vector<uint8_t> configAsVec(str.begin(), str.end());
- service->addConfiguration(kConfigKey, configAsVec, kCallingUid);
-}
-
-ConfigMetricsReport GetReports(sp<StatsLogProcessor> processor, int64_t timestamp,
- bool include_current = false) {
- vector<uint8_t> output;
- ConfigKey configKey(AIBinder_getCallingUid(), kConfigKey);
- processor->onDumpReport(configKey, timestamp, include_current /* include_current_bucket*/,
- true /* erase_data */, ADB_DUMP, NO_TIME_CONSTRAINTS, &output);
- ConfigMetricsReportList reports;
- reports.ParseFromArray(output.data(), output.size());
- EXPECT_EQ(1, reports.reports_size());
- return reports.reports(kCallingUid);
-}
StatsdConfig MakeCountMetricConfig(const std::optional<bool> splitBucket) {
StatsdConfig config;
@@ -121,24 +101,25 @@
}
} // anonymous namespace
-TEST(PartialBucketE2eTest, TestCountMetricWithoutSplit) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- SendConfig(service, MakeCountMetricConfig({true}));
+// Setup for test fixture.
+class PartialBucketE2eTest : public StatsServiceConfigTest {};
+
+TEST_F(PartialBucketE2eTest, TestCountMetricWithoutSplit) {
+ sendConfig(MakeCountMetricConfig({true}));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 1, 100).get());
service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 2, 100).get());
- ConfigMetricsReport report = GetReports(service->mProcessor, start + 3);
+ ConfigMetricsReport report = getReports(service->mProcessor, start + 3);
// Expect no metrics since the bucket has not finished yet.
ASSERT_EQ(1, report.metrics_size());
ASSERT_EQ(0, report.metrics(0).count_metrics().data_size());
}
-TEST(PartialBucketE2eTest, TestCountMetricNoSplitOnNewApp) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- SendConfig(service, MakeCountMetricConfig({true}));
+TEST_F(PartialBucketE2eTest, TestCountMetricNoSplitOnNewApp) {
+ sendConfig(MakeCountMetricConfig({true}));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
@@ -151,14 +132,13 @@
// Goes into the second bucket.
service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 3, 100).get());
- ConfigMetricsReport report = GetReports(service->mProcessor, start + 4);
+ ConfigMetricsReport report = getReports(service->mProcessor, start + 4);
ASSERT_EQ(1, report.metrics_size());
ASSERT_EQ(0, report.metrics(0).count_metrics().data_size());
}
-TEST(PartialBucketE2eTest, TestCountMetricSplitOnUpgrade) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- SendConfig(service, MakeCountMetricConfig({true}));
+TEST_F(PartialBucketE2eTest, TestCountMetricSplitOnUpgrade) {
+ sendConfig(MakeCountMetricConfig({true}));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
service->mUidMap->updateMap(start, {1}, {1}, {String16("v1")}, {String16(kApp1.c_str())},
@@ -171,7 +151,7 @@
// Goes into the second bucket.
service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 3, 100).get());
- ConfigMetricsReport report = GetReports(service->mProcessor, start + 4);
+ ConfigMetricsReport report = getReports(service->mProcessor, start + 4);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
@@ -190,9 +170,8 @@
EXPECT_EQ(1, report.metrics(0).count_metrics().data(0).bucket_info(0).count());
}
-TEST(PartialBucketE2eTest, TestCountMetricSplitOnRemoval) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- SendConfig(service, MakeCountMetricConfig({true}));
+TEST_F(PartialBucketE2eTest, TestCountMetricSplitOnRemoval) {
+ sendConfig(MakeCountMetricConfig({true}));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
service->mUidMap->updateMap(start, {1}, {1}, {String16("v1")}, {String16(kApp1.c_str())},
@@ -204,7 +183,7 @@
// Goes into the second bucket.
service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 3, 100).get());
- ConfigMetricsReport report = GetReports(service->mProcessor, start + 4);
+ ConfigMetricsReport report = getReports(service->mProcessor, start + 4);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
@@ -223,9 +202,8 @@
EXPECT_EQ(1, report.metrics(0).count_metrics().data(0).bucket_info(0).count());
}
-TEST(PartialBucketE2eTest, TestCountMetricSplitOnBoot) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
- SendConfig(service, MakeCountMetricConfig(std::nullopt));
+TEST_F(PartialBucketE2eTest, TestCountMetricSplitOnBoot) {
+ sendConfig(MakeCountMetricConfig(std::nullopt));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
@@ -236,7 +214,7 @@
// Goes into the second bucket.
service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 3 * NS_PER_SEC, 100).get());
- ConfigMetricsReport report = GetReports(service->mProcessor, start + 4 * NS_PER_SEC);
+ ConfigMetricsReport report = getReports(service->mProcessor, start + 4 * NS_PER_SEC);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
@@ -252,10 +230,9 @@
EXPECT_EQ(1, report.metrics(0).count_metrics().data(0).bucket_info(0).count());
}
-TEST(PartialBucketE2eTest, TestCountMetricNoSplitOnUpgradeWhenDisabled) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+TEST_F(PartialBucketE2eTest, TestCountMetricNoSplitOnUpgradeWhenDisabled) {
StatsdConfig config = MakeCountMetricConfig({false});
- SendConfig(service, config);
+ sendConfig(config);
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
service->mUidMap->updateMap(start, {1}, {1}, {String16("v1")}, {String16(kApp1.c_str())},
@@ -269,7 +246,7 @@
service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 3, 100).get());
ConfigMetricsReport report =
- GetReports(service->mProcessor, start + 4, /*include_current=*/true);
+ getReports(service->mProcessor, start + 4, /*include_current=*/true);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
@@ -280,15 +257,14 @@
EXPECT_EQ(bucketInfo.count(), 2);
}
-TEST(PartialBucketE2eTest, TestValueMetricWithoutMinPartialBucket) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+TEST_F(PartialBucketE2eTest, TestValueMetricWithoutMinPartialBucket) {
service->mPullerManager->RegisterPullAtomCallback(
/*uid=*/0, util::SUBSYSTEM_SLEEP_STATE, NS_PER_SEC, NS_PER_SEC * 10, {},
SharedRefBase::make<FakeSubsystemSleepCallback>());
// Partial buckets don't occur when app is first installed.
service->mUidMap->updateApp(1, String16(kApp1.c_str()), 1, 1, String16("v1"), String16(""),
/* certificateHash */ {});
- SendConfig(service, MakeValueMetricConfig(0));
+ sendConfig(MakeValueMetricConfig(0));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
@@ -298,7 +274,7 @@
String16(""), /* certificateHash */ {});
ConfigMetricsReport report =
- GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
+ getReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
@@ -311,15 +287,14 @@
report.metrics(0).value_metrics().data(0).bucket_info(1).end_bucket_elapsed_nanos());
}
-TEST(PartialBucketE2eTest, TestValueMetricWithMinPartialBucket) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+TEST_F(PartialBucketE2eTest, TestValueMetricWithMinPartialBucket) {
service->mPullerManager->RegisterPullAtomCallback(
/*uid=*/0, util::SUBSYSTEM_SLEEP_STATE, NS_PER_SEC, NS_PER_SEC * 10, {},
SharedRefBase::make<FakeSubsystemSleepCallback>());
// Partial buckets don't occur when app is first installed.
service->mUidMap->updateApp(1, String16(kApp1.c_str()), 1, 1, String16("v1"), String16(""),
/* certificateHash */ {});
- SendConfig(service, MakeValueMetricConfig(60 * NS_PER_SEC /* One minute */));
+ sendConfig(MakeValueMetricConfig(60 * NS_PER_SEC /* One minute */));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
@@ -329,7 +304,7 @@
String16(""), /* certificateHash */ {});
ConfigMetricsReport report =
- GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
+ getReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
@@ -343,10 +318,9 @@
ASSERT_EQ(1, report.metrics(0).value_metrics().data(0).bucket_info_size());
}
-TEST(PartialBucketE2eTest, TestValueMetricOnBootWithoutMinPartialBucket) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+TEST_F(PartialBucketE2eTest, TestValueMetricOnBootWithoutMinPartialBucket) {
// Initial pull will fail since puller is not registered.
- SendConfig(service, MakeValueMetricConfig(0));
+ sendConfig(MakeValueMetricConfig(0));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
@@ -359,7 +333,7 @@
service->mProcessor->informPullAlarmFired(5 * 60 * NS_PER_SEC + start);
- ConfigMetricsReport report = GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
+ ConfigMetricsReport report = getReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
backfillStartEndTimestamp(&report);
// First bucket is dropped due to the initial pull failing
@@ -376,15 +350,14 @@
report.metrics(0).value_metrics().data(0).bucket_info(0).start_bucket_elapsed_nanos());
}
-TEST(PartialBucketE2eTest, TestGaugeMetricWithoutMinPartialBucket) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+TEST_F(PartialBucketE2eTest, TestGaugeMetricWithoutMinPartialBucket) {
service->mPullerManager->RegisterPullAtomCallback(
/*uid=*/0, util::SUBSYSTEM_SLEEP_STATE, NS_PER_SEC, NS_PER_SEC * 10, {},
SharedRefBase::make<FakeSubsystemSleepCallback>());
// Partial buckets don't occur when app is first installed.
service->mUidMap->updateApp(1, String16(kApp1.c_str()), 1, 1, String16("v1"), String16(""),
/* certificateHash */ {});
- SendConfig(service, MakeGaugeMetricConfig(0));
+ sendConfig(MakeGaugeMetricConfig(0));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
@@ -392,7 +365,7 @@
service->mUidMap->updateApp(5 * 60 * NS_PER_SEC + start + 2, String16(kApp1.c_str()), 1, 2,
String16("v2"), String16(""), /* certificateHash */ {});
- ConfigMetricsReport report = GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
+ ConfigMetricsReport report = getReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
ASSERT_EQ(0, report.metrics(0).gauge_metrics().skipped_size());
@@ -401,15 +374,14 @@
ASSERT_EQ(2, report.metrics(0).gauge_metrics().data(0).bucket_info_size());
}
-TEST(PartialBucketE2eTest, TestGaugeMetricWithMinPartialBucket) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+TEST_F(PartialBucketE2eTest, TestGaugeMetricWithMinPartialBucket) {
// Partial buckets don't occur when app is first installed.
service->mUidMap->updateApp(1, String16(kApp1.c_str()), 1, 1, String16("v1"), String16(""),
/* certificateHash */ {});
service->mPullerManager->RegisterPullAtomCallback(
/*uid=*/0, util::SUBSYSTEM_SLEEP_STATE, NS_PER_SEC, NS_PER_SEC * 10, {},
SharedRefBase::make<FakeSubsystemSleepCallback>());
- SendConfig(service, MakeGaugeMetricConfig(60 * NS_PER_SEC /* One minute */));
+ sendConfig(MakeGaugeMetricConfig(60 * NS_PER_SEC /* One minute */));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
@@ -419,7 +391,7 @@
String16(""), /* certificateHash */ {});
ConfigMetricsReport report =
- GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
+ getReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100 * NS_PER_SEC);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
ASSERT_EQ(1, report.metrics(0).gauge_metrics().skipped_size());
@@ -431,10 +403,9 @@
ASSERT_EQ(1, report.metrics(0).gauge_metrics().data(0).bucket_info_size());
}
-TEST(PartialBucketE2eTest, TestGaugeMetricOnBootWithoutMinPartialBucket) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+TEST_F(PartialBucketE2eTest, TestGaugeMetricOnBootWithoutMinPartialBucket) {
// Initial pull will fail since puller hasn't been registered.
- SendConfig(service, MakeGaugeMetricConfig(0));
+ sendConfig(MakeGaugeMetricConfig(0));
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
@@ -447,7 +418,7 @@
service->mProcessor->informPullAlarmFired(5 * 60 * NS_PER_SEC + start);
- ConfigMetricsReport report = GetReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
+ ConfigMetricsReport report = getReports(service->mProcessor, 5 * 60 * NS_PER_SEC + start + 100);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
@@ -461,10 +432,9 @@
report.metrics(0).gauge_metrics().data(0).bucket_info(0).start_bucket_elapsed_nanos());
}
-TEST(PartialBucketE2eTest, TestCountMetricNoSplitByDefault) {
- shared_ptr<StatsService> service = SharedRefBase::make<StatsService>(nullptr, nullptr);
+TEST_F(PartialBucketE2eTest, TestCountMetricNoSplitByDefault) {
StatsdConfig config = MakeCountMetricConfig({nullopt}); // Do not set the value in the metric.
- SendConfig(service, config);
+ sendConfig(config);
int64_t start = getElapsedRealtimeNs(); // This is the start-time the metrics producers are
// initialized with.
service->mUidMap->updateMap(start, {1}, {1}, {String16("v1")}, {String16(kApp1.c_str())},
@@ -478,7 +448,7 @@
service->mProcessor->OnLogEvent(CreateAppCrashEvent(start + 3, 100).get());
ConfigMetricsReport report =
- GetReports(service->mProcessor, start + 4, /*include_current=*/true);
+ getReports(service->mProcessor, start + 4, /*include_current=*/true);
backfillStartEndTimestamp(&report);
ASSERT_EQ(1, report.metrics_size());
diff --git a/statsd/tests/e2e/RestrictedConfig_e2e_test.cpp b/statsd/tests/e2e/RestrictedConfig_e2e_test.cpp
new file mode 100644
index 0000000..d02a8e5
--- /dev/null
+++ b/statsd/tests/e2e/RestrictedConfig_e2e_test.cpp
@@ -0,0 +1,479 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <android-modules-utils/sdk_level.h>
+#include <gtest/gtest.h>
+
+#include "flags/FlagProvider.h"
+#include "storage/StorageManager.h"
+#include "tests/statsd_test_util.h"
+
+namespace android {
+namespace os {
+namespace statsd {
+
+using android::modules::sdklevel::IsAtLeastU;
+
+#ifdef __ANDROID__
+
+namespace {
+const int32_t atomTag = 666;
+const string delegatePackageName = "com.test.restricted.metrics.package";
+const int32_t delegateUid = 10200;
+const string configPackageName = "com.test.config.package";
+int64_t metricId;
+int64_t anotherMetricId;
+
+StatsdConfig CreateConfigWithOneMetric() {
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ AtomMatcher atomMatcher = CreateSimpleAtomMatcher("testmatcher", atomTag);
+ *config.add_atom_matcher() = atomMatcher;
+
+ EventMetric eventMetric = createEventMetric("EventMetric", atomMatcher.id(), nullopt);
+ metricId = eventMetric.id();
+ *config.add_event_metric() = eventMetric;
+ return config;
+}
+StatsdConfig CreateConfigWithTwoMetrics() {
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ AtomMatcher atomMatcher = CreateSimpleAtomMatcher("testmatcher", atomTag);
+ *config.add_atom_matcher() = atomMatcher;
+
+ EventMetric eventMetric = createEventMetric("EventMetric", atomMatcher.id(), nullopt);
+ metricId = eventMetric.id();
+ *config.add_event_metric() = eventMetric;
+ EventMetric anotherEventMetric =
+ createEventMetric("AnotherEventMetric", atomMatcher.id(), nullopt);
+ anotherMetricId = anotherEventMetric.id();
+ *config.add_event_metric() = anotherEventMetric;
+ return config;
+}
+
+std::vector<std::unique_ptr<LogEvent>> CreateLogEvents(int64_t configAddedTimeNs) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateNonRestrictedLogEvent(atomTag, configAddedTimeNs + 10 * NS_PER_SEC));
+ events.push_back(CreateNonRestrictedLogEvent(atomTag, configAddedTimeNs + 20 * NS_PER_SEC));
+ events.push_back(CreateNonRestrictedLogEvent(atomTag, configAddedTimeNs + 30 * NS_PER_SEC));
+ return events;
+}
+
+} // Anonymous namespace
+
+class RestrictedConfigE2ETest : public StatsServiceConfigTest {
+protected:
+ shared_ptr<MockStatsQueryCallback> mockStatsQueryCallback;
+ const ConfigKey configKey = ConfigKey(kCallingUid, kConfigKey);
+ vector<string> queryDataResult;
+ vector<string> columnNamesResult;
+ vector<int32_t> columnTypesResult;
+ int32_t rowCountResult = 0;
+ string error;
+
+ void SetUp() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ StatsServiceConfigTest::SetUp();
+
+ mockStatsQueryCallback = SharedRefBase::make<StrictMock<MockStatsQueryCallback>>();
+ EXPECT_CALL(*mockStatsQueryCallback, sendResults(_, _, _, _))
+ .Times(AnyNumber())
+ .WillRepeatedly(Invoke(
+ [this](const vector<string>& queryData, const vector<string>& columnNames,
+ const vector<int32_t>& columnTypes, int32_t rowCount) {
+ queryDataResult = queryData;
+ columnNamesResult = columnNames;
+ columnTypesResult = columnTypes;
+ rowCountResult = rowCount;
+ error = "";
+ return Status::ok();
+ }));
+ EXPECT_CALL(*mockStatsQueryCallback, sendFailure(_))
+ .Times(AnyNumber())
+ .WillRepeatedly(Invoke([this](const string& err) {
+ error = err;
+ queryDataResult.clear();
+ columnNamesResult.clear();
+ columnTypesResult.clear();
+ rowCountResult = 0;
+ return Status::ok();
+ }));
+
+ int64_t startTimeNs = getElapsedRealtimeNs();
+ service->mUidMap->updateMap(
+ startTimeNs, {delegateUid, kCallingUid},
+ /*versionCode=*/{1, 1}, /*versionString=*/{String16("v2"), String16("v2")},
+ {String16(delegatePackageName.c_str()), String16(configPackageName.c_str())},
+ /*installer=*/{String16(), String16()}, /*certificateHash=*/{{}, {}});
+ }
+ void TearDown() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ Mock::VerifyAndClear(mockStatsQueryCallback.get());
+ queryDataResult.clear();
+ columnNamesResult.clear();
+ columnTypesResult.clear();
+ rowCountResult = 0;
+ error = "";
+ StatsServiceConfigTest::TearDown();
+ FlagProvider::getInstance().resetOverrides();
+ dbutils::deleteDb(configKey);
+ }
+
+ void verifyRestrictedData(int32_t expectedNumOfMetrics, int64_t metricIdToVerify = metricId,
+ bool shouldExist = true) {
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(metricIdToVerify);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ if (shouldExist) {
+ EXPECT_TRUE(
+ dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ EXPECT_EQ(rows.size(), expectedNumOfMetrics);
+ } else {
+ // Expect that table is deleted.
+ EXPECT_FALSE(
+ dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ }
+ }
+};
+
+TEST_F(RestrictedConfigE2ETest, RestrictedConfigNoReport) {
+ StatsdConfig config = CreateConfigWithOneMetric();
+ config.set_restricted_metrics_delegate_package_name("delegate");
+ sendConfig(config);
+ int64_t configAddedTimeNs = getElapsedRealtimeNs();
+
+ for (auto& event : CreateLogEvents(configAddedTimeNs)) {
+ service->OnLogEvent(event.get());
+ }
+
+ vector<uint8_t> output;
+ ConfigKey configKey(kCallingUid, kConfigKey);
+ service->getData(kConfigKey, kCallingUid, &output);
+
+ EXPECT_TRUE(output.empty());
+}
+
+TEST_F(RestrictedConfigE2ETest, NonRestrictedConfigGetReport) {
+ StatsdConfig config = CreateConfigWithOneMetric();
+ sendConfig(config);
+ int64_t configAddedTimeNs = getElapsedRealtimeNs();
+
+ for (auto& event : CreateLogEvents(configAddedTimeNs)) {
+ service->OnLogEvent(event.get());
+ }
+
+ ConfigMetricsReport report = getReports(service->mProcessor, /*timestamp=*/10);
+ EXPECT_EQ(report.metrics_size(), 1);
+}
+
+TEST_F(RestrictedConfigE2ETest, RestrictedShutdownFlushToRestrictedDB) {
+ StatsdConfig config = CreateConfigWithOneMetric();
+ config.set_restricted_metrics_delegate_package_name("delegate");
+ sendConfig(config);
+ int64_t configAddedTimeNs = getElapsedRealtimeNs();
+ std::vector<std::unique_ptr<LogEvent>> logEvents = CreateLogEvents(configAddedTimeNs);
+ for (const auto& e : logEvents) {
+ service->OnLogEvent(e.get());
+ }
+
+ service->informDeviceShutdown();
+
+ // Should not be written to non-restricted storage.
+ EXPECT_FALSE(StorageManager::hasConfigMetricsReport(ConfigKey(kCallingUid, kConfigKey)));
+ verifyRestrictedData(logEvents.size());
+}
+
+TEST_F(RestrictedConfigE2ETest, NonRestrictedOnShutdownWriteDataToDisk) {
+ StatsdConfig config = CreateConfigWithOneMetric();
+ sendConfig(config);
+ int64_t configAddedTimeNs = getElapsedRealtimeNs();
+ for (auto& event : CreateLogEvents(configAddedTimeNs)) {
+ service->OnLogEvent(event.get());
+ }
+
+ service->informDeviceShutdown();
+
+ EXPECT_TRUE(StorageManager::hasConfigMetricsReport(ConfigKey(kCallingUid, kConfigKey)));
+}
+
+TEST_F(RestrictedConfigE2ETest, RestrictedConfigOnTerminateFlushToRestrictedDB) {
+ StatsdConfig config = CreateConfigWithOneMetric();
+ config.set_restricted_metrics_delegate_package_name("delegate");
+ sendConfig(config);
+ int64_t configAddedTimeNs = getElapsedRealtimeNs();
+ std::vector<std::unique_ptr<LogEvent>> logEvents = CreateLogEvents(configAddedTimeNs);
+ for (auto& event : logEvents) {
+ service->OnLogEvent(event.get());
+ }
+
+ service->Terminate();
+
+ EXPECT_FALSE(StorageManager::hasConfigMetricsReport(ConfigKey(kCallingUid, kConfigKey)));
+ verifyRestrictedData(logEvents.size());
+}
+
+TEST_F(RestrictedConfigE2ETest, NonRestrictedConfigOnTerminateWriteDataToDisk) {
+ StatsdConfig config = CreateConfigWithOneMetric();
+ sendConfig(config);
+ int64_t configAddedTimeNs = getElapsedRealtimeNs();
+ for (auto& event : CreateLogEvents(configAddedTimeNs)) {
+ service->OnLogEvent(event.get());
+ }
+
+ service->Terminate();
+
+ EXPECT_TRUE(StorageManager::hasConfigMetricsReport(ConfigKey(kCallingUid, kConfigKey)));
+}
+
+TEST_F(RestrictedConfigE2ETest, RestrictedConfigOnUpdateWithMetricRemoval) {
+ StatsdConfig complexConfig = CreateConfigWithTwoMetrics();
+ complexConfig.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(complexConfig);
+ int64_t configAddedTimeNs = getElapsedRealtimeNs();
+ std::vector<std::unique_ptr<LogEvent>> logEvents = CreateLogEvents(configAddedTimeNs);
+ for (auto& event : logEvents) {
+ service->OnLogEvent(event.get());
+ }
+
+ // Use query API to make sure data is flushed.
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(metricId);
+ service->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/kConfigKey, /*configPackage=*/configPackageName,
+ /*callingUid=*/delegateUid);
+ EXPECT_EQ(error, "");
+ EXPECT_EQ(rowCountResult, logEvents.size());
+ verifyRestrictedData(logEvents.size(), anotherMetricId, true);
+
+ // Update config to have only one metric
+ StatsdConfig config = CreateConfigWithOneMetric();
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+
+ // Make sure metric data is deleted.
+ verifyRestrictedData(logEvents.size(), metricId, true);
+ verifyRestrictedData(logEvents.size(), anotherMetricId, false);
+}
+
+TEST_F(RestrictedConfigE2ETest, TestSendRestrictedMetricsChangedBroadcast) {
+ vector<int64_t> receivedMetricIds;
+ int receiveCount = 0;
+ shared_ptr<MockPendingIntentRef> pir = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir, sendRestrictedMetricsChangedBroadcast(_))
+ .Times(7)
+ .WillRepeatedly(Invoke([&receivedMetricIds, &receiveCount](const vector<int64_t>& ids) {
+ receiveCount++;
+ receivedMetricIds = ids;
+ return Status::ok();
+ }));
+
+ // Set the operation. No configs present so empty list is returned.
+ vector<int64_t> returnedMetricIds;
+ service->setRestrictedMetricsChangedOperation(kConfigKey, configPackageName, pir, delegateUid,
+ &returnedMetricIds);
+ EXPECT_EQ(receiveCount, 0);
+ EXPECT_THAT(returnedMetricIds, IsEmpty());
+
+ // Add restricted config. Should receive one metric
+ StatsdConfig config = CreateConfigWithOneMetric();
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+ EXPECT_EQ(receiveCount, 1);
+ EXPECT_THAT(receivedMetricIds, UnorderedElementsAre(metricId));
+
+ // Config update, should receive two metrics.
+ config = CreateConfigWithTwoMetrics();
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+ EXPECT_EQ(receiveCount, 2);
+ EXPECT_THAT(receivedMetricIds, UnorderedElementsAre(metricId, anotherMetricId));
+
+ // Make config unrestricted. Should receive empty list.
+ config.clear_restricted_metrics_delegate_package_name();
+ sendConfig(config);
+ EXPECT_EQ(receiveCount, 3);
+ EXPECT_THAT(receivedMetricIds, IsEmpty());
+
+ // Update the unrestricted config. Nothing should be sent.
+ config = CreateConfigWithOneMetric();
+ sendConfig(config);
+
+ // Update config and make it restricted. Should receive one metric.
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+ EXPECT_EQ(receiveCount, 4);
+ EXPECT_THAT(receivedMetricIds, UnorderedElementsAre(metricId));
+
+ // Send an invalid config. Should receive empty list.
+ config.clear_allowed_log_source();
+ sendConfig(config);
+ EXPECT_EQ(receiveCount, 5);
+ EXPECT_THAT(receivedMetricIds, IsEmpty());
+
+ service->removeRestrictedMetricsChangedOperation(kConfigKey, configPackageName, delegateUid);
+
+ // Nothing should be sent since the operation is removed.
+ config = CreateConfigWithTwoMetrics();
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+
+ // Set the operation. Two metrics should be returned.
+ returnedMetricIds.clear();
+ service->setRestrictedMetricsChangedOperation(kConfigKey, configPackageName, pir, delegateUid,
+ &returnedMetricIds);
+ EXPECT_THAT(returnedMetricIds, UnorderedElementsAre(metricId, anotherMetricId));
+ EXPECT_EQ(receiveCount, 5);
+
+ // Config update, should receive two metrics.
+ config = CreateConfigWithOneMetric();
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+ EXPECT_EQ(receiveCount, 6);
+ EXPECT_THAT(receivedMetricIds, UnorderedElementsAre(metricId));
+
+ // Remove the config and verify an empty list is received
+ service->removeConfiguration(kConfigKey, kCallingUid);
+ EXPECT_EQ(receiveCount, 7);
+ EXPECT_THAT(receivedMetricIds, IsEmpty());
+
+ // Cleanup.
+ service->removeRestrictedMetricsChangedOperation(kConfigKey, configPackageName, delegateUid);
+}
+
+TEST_F(RestrictedConfigE2ETest, TestSendRestrictedMetricsChangedBroadcastMultipleListeners) {
+ const string configPackageName2 = "com.test.config.package2";
+ const int32_t delegateUid2 = delegateUid + 1, delegateUid3 = delegateUid + 2;
+ service->informOnePackage(configPackageName2, kCallingUid, 0, "", "", {});
+ service->informOnePackage(delegatePackageName, delegateUid2, 0, "", "", {});
+ service->informOnePackage("not.a.good.package", delegateUid3, 0, "", "", {});
+
+ vector<int64_t> receivedMetricIds;
+ int receiveCount = 0;
+ shared_ptr<MockPendingIntentRef> pir = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir, sendRestrictedMetricsChangedBroadcast(_))
+ .Times(2)
+ .WillRepeatedly(Invoke([&receivedMetricIds, &receiveCount](const vector<int64_t>& ids) {
+ receiveCount++;
+ receivedMetricIds = ids;
+ return Status::ok();
+ }));
+
+ int receiveCount2 = 0;
+ vector<int64_t> receivedMetricIds2;
+ shared_ptr<MockPendingIntentRef> pir2 = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir2, sendRestrictedMetricsChangedBroadcast(_))
+ .Times(2)
+ .WillRepeatedly(
+ Invoke([&receivedMetricIds2, &receiveCount2](const vector<int64_t>& ids) {
+ receiveCount2++;
+ receivedMetricIds2 = ids;
+ return Status::ok();
+ }));
+
+ // This one should never be called.
+ shared_ptr<MockPendingIntentRef> pir3 = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ EXPECT_CALL(*pir3, sendRestrictedMetricsChangedBroadcast(_)).Times(0);
+
+ // Set the operations. No configs present so empty list is returned.
+ vector<int64_t> returnedMetricIds;
+ service->setRestrictedMetricsChangedOperation(kConfigKey, configPackageName, pir, delegateUid,
+ &returnedMetricIds);
+ EXPECT_EQ(receiveCount, 0);
+ EXPECT_THAT(returnedMetricIds, IsEmpty());
+
+ vector<int64_t> returnedMetricIds2;
+ service->setRestrictedMetricsChangedOperation(kConfigKey, configPackageName2, pir2,
+ delegateUid2, &returnedMetricIds2);
+ EXPECT_EQ(receiveCount2, 0);
+ EXPECT_THAT(returnedMetricIds2, IsEmpty());
+
+ // Represents a package listening for changes but doesn't match the restricted package in the
+ // config.
+ vector<int64_t> returnedMetricIds3;
+ service->setRestrictedMetricsChangedOperation(kConfigKey, configPackageName, pir3, delegateUid3,
+ &returnedMetricIds3);
+ EXPECT_THAT(returnedMetricIds3, IsEmpty());
+
+ // Add restricted config. Should receive one metric on pir1 and 2.
+ StatsdConfig config = CreateConfigWithOneMetric();
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+ EXPECT_EQ(receiveCount, 1);
+ EXPECT_THAT(receivedMetricIds, UnorderedElementsAre(metricId));
+ EXPECT_EQ(receiveCount2, 1);
+ EXPECT_THAT(receivedMetricIds2, UnorderedElementsAre(metricId));
+
+ // Config update, should receive two metrics on pir1 and 2.
+ config = CreateConfigWithTwoMetrics();
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+ EXPECT_EQ(receiveCount, 2);
+ EXPECT_THAT(receivedMetricIds, UnorderedElementsAre(metricId, anotherMetricId));
+ EXPECT_EQ(receiveCount2, 2);
+ EXPECT_THAT(receivedMetricIds2, UnorderedElementsAre(metricId, anotherMetricId));
+
+ // Cleanup.
+ service->removeRestrictedMetricsChangedOperation(kConfigKey, configPackageName, delegateUid);
+ service->removeRestrictedMetricsChangedOperation(kConfigKey, configPackageName2, delegateUid2);
+ service->removeRestrictedMetricsChangedOperation(kConfigKey, configPackageName, delegateUid3);
+}
+
+TEST_F(RestrictedConfigE2ETest, TestSendRestrictedMetricsChangedBroadcastMultipleMatchedConfigs) {
+ const int32_t callingUid2 = kCallingUid + 1;
+ service->informOnePackage(configPackageName, callingUid2, 0, "", "", {});
+
+ // Add restricted config.
+ StatsdConfig config = CreateConfigWithOneMetric();
+ config.set_restricted_metrics_delegate_package_name(delegatePackageName);
+ sendConfig(config);
+
+ // Add a second config.
+ const int64_t metricId2 = 42;
+ config.mutable_event_metric(0)->set_id(42);
+ string str;
+ config.SerializeToString(&str);
+ std::vector<uint8_t> configAsVec(str.begin(), str.end());
+ service->addConfiguration(kConfigKey, configAsVec, callingUid2);
+
+ // Set the operation. Matches multiple configs so a union of metrics are returned.
+ shared_ptr<MockPendingIntentRef> pir = SharedRefBase::make<StrictMock<MockPendingIntentRef>>();
+ vector<int64_t> returnedMetricIds;
+ service->setRestrictedMetricsChangedOperation(kConfigKey, configPackageName, pir, delegateUid,
+ &returnedMetricIds);
+ EXPECT_THAT(returnedMetricIds, UnorderedElementsAre(metricId, metricId2));
+
+ // Cleanup.
+ service->removeRestrictedMetricsChangedOperation(kConfigKey, configPackageName, delegateUid);
+
+ ConfigKey cfgKey(callingUid2, kConfigKey);
+ service->removeConfiguration(kConfigKey, callingUid2);
+ service->mProcessor->onDumpReport(cfgKey, getElapsedRealtimeNs(),
+ false /* include_current_bucket*/, true /* erase_data */,
+ ADB_DUMP, NO_TIME_CONSTRAINTS, nullptr);
+}
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
+
+} // namespace statsd
+} // namespace os
+} // namespace android
\ No newline at end of file
diff --git a/statsd/tests/e2e/RestrictedEventMetric_e2e_test.cpp b/statsd/tests/e2e/RestrictedEventMetric_e2e_test.cpp
new file mode 100644
index 0000000..a45a88e
--- /dev/null
+++ b/statsd/tests/e2e/RestrictedEventMetric_e2e_test.cpp
@@ -0,0 +1,1193 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <android-modules-utils/sdk_level.h>
+#include <gtest/gtest.h>
+
+#include <vector>
+
+#include "android-base/stringprintf.h"
+#include "flags/FlagProvider.h"
+#include "src/StatsLogProcessor.h"
+#include "src/state/StateTracker.h"
+#include "src/stats_log_util.h"
+#include "src/storage/StorageManager.h"
+#include "src/utils/RestrictedPolicyManager.h"
+#include "stats_annotations.h"
+#include "tests/statsd_test_util.h"
+
+namespace android {
+namespace os {
+namespace statsd {
+
+using android::modules::sdklevel::IsAtLeastU;
+using base::StringPrintf;
+
+#ifdef __ANDROID__
+
+namespace {
+const int64_t oneMonthLater = getWallClockNs() + 31 * 24 * 3600 * NS_PER_SEC;
+const int64_t configId = 12345;
+const string delegate_package_name = "com.test.restricted.metrics.package";
+const int32_t delegate_uid = 1005;
+const string config_package_name = "com.test.config.package";
+const int32_t config_app_uid = 123;
+const ConfigKey configKey(config_app_uid, configId);
+const int64_t eightDaysAgo = getWallClockNs() - 8 * 24 * 3600 * NS_PER_SEC;
+const int64_t oneDayAgo = getWallClockNs() - 1 * 24 * 3600 * NS_PER_SEC;
+} // anonymous namespace
+
+// Setup for test fixture.
+class RestrictedEventMetricE2eTest : public ::testing::Test {
+protected:
+ shared_ptr<MockStatsQueryCallback> mockStatsQueryCallback;
+ vector<string> queryDataResult;
+ vector<string> columnNamesResult;
+ vector<int32_t> columnTypesResult;
+ int32_t rowCountResult = 0;
+ string error;
+ sp<UidMap> uidMap;
+ sp<StatsLogProcessor> processor;
+ int32_t atomTag;
+ int64_t restrictedMetricId;
+ int64_t configAddedTimeNs;
+ StatsdConfig config;
+
+private:
+ void SetUp() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+
+ mockStatsQueryCallback = SharedRefBase::make<StrictMock<MockStatsQueryCallback>>();
+ EXPECT_CALL(*mockStatsQueryCallback, sendResults(_, _, _, _))
+ .Times(AnyNumber())
+ .WillRepeatedly(Invoke(
+ [this](const vector<string>& queryData, const vector<string>& columnNames,
+ const vector<int32_t>& columnTypes, int32_t rowCount) {
+ queryDataResult = queryData;
+ columnNamesResult = columnNames;
+ columnTypesResult = columnTypes;
+ rowCountResult = rowCount;
+ error = "";
+ return Status::ok();
+ }));
+ EXPECT_CALL(*mockStatsQueryCallback, sendFailure(_))
+ .Times(AnyNumber())
+ .WillRepeatedly(Invoke([this](const string& err) {
+ error = err;
+ queryDataResult.clear();
+ columnNamesResult.clear();
+ columnTypesResult.clear();
+ rowCountResult = 0;
+ return Status::ok();
+ }));
+
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+
+ atomTag = 999;
+ AtomMatcher restrictedAtomMatcher = CreateSimpleAtomMatcher("restricted_matcher", atomTag);
+ *config.add_atom_matcher() = restrictedAtomMatcher;
+
+ EventMetric restrictedEventMetric =
+ createEventMetric("RestrictedMetricLogged", restrictedAtomMatcher.id(), nullopt);
+ *config.add_event_metric() = restrictedEventMetric;
+ restrictedMetricId = restrictedEventMetric.id();
+
+ config.set_restricted_metrics_delegate_package_name(delegate_package_name.c_str());
+
+ const int64_t baseTimeNs = 0; // 0:00
+ configAddedTimeNs = baseTimeNs + 1 * NS_PER_SEC; // 0:01
+
+ uidMap = new UidMap();
+ uidMap->updateApp(configAddedTimeNs, String16(delegate_package_name.c_str()),
+ /*uid=*/delegate_uid, /*versionCode=*/1,
+ /*versionString=*/String16("v2"),
+ /*installer=*/String16(""), /*certificateHash=*/{});
+ uidMap->updateApp(configAddedTimeNs + 1, String16(config_package_name.c_str()),
+ /*uid=*/config_app_uid, /*versionCode=*/1,
+ /*versionString=*/String16("v2"),
+ /*installer=*/String16(""), /*certificateHash=*/{});
+
+ processor = CreateStatsLogProcessor(baseTimeNs, configAddedTimeNs, config, configKey,
+ /*puller=*/nullptr, /*atomTag=*/0, uidMap);
+ }
+
+ void TearDown() override {
+ Mock::VerifyAndClear(mockStatsQueryCallback.get());
+ queryDataResult.clear();
+ columnNamesResult.clear();
+ columnTypesResult.clear();
+ rowCountResult = 0;
+ error = "";
+ dbutils::deleteDb(configKey);
+ dbutils::deleteDb(ConfigKey(config_app_uid + 1, configId));
+ FlagProvider::getInstance().resetOverrides();
+ }
+};
+
+TEST_F(RestrictedEventMetricE2eTest, TestQueryThreeEvents) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 200));
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 300));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(rowCountResult, 3);
+ EXPECT_THAT(queryDataResult, ElementsAre(to_string(atomTag), to_string(configAddedTimeNs + 100),
+ _, // wallClockNs
+ _, // field_1
+ to_string(atomTag), to_string(configAddedTimeNs + 200),
+ _, // wallClockNs
+ _, // field_1
+ to_string(atomTag), to_string(configAddedTimeNs + 300),
+ _, // wallClockNs
+ _ // field_1
+ ));
+
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestInvalidSchemaIncreasingFieldCount) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, atomTag);
+ AStatsEvent_addInt32Annotation(statsEvent, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY,
+ ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC);
+ AStatsEvent_overwriteTimestamp(statsEvent, configAddedTimeNs + 200);
+ // This event has two extra fields
+ AStatsEvent_writeString(statsEvent, "111");
+ AStatsEvent_writeInt32(statsEvent, 11);
+ AStatsEvent_writeFloat(statsEvent, 11.0);
+ std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, logEvent.get());
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+ events.push_back(std::move(logEvent));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ processor->WriteDataToDisk(DEVICE_SHUTDOWN, FAST,
+ event->GetElapsedTimestampNs() + 20 * NS_PER_SEC,
+ getWallClockNs());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(rowCountResult, 1);
+ // Event 2 rejected.
+ EXPECT_THAT(queryDataResult, ElementsAre(to_string(atomTag), to_string(configAddedTimeNs + 100),
+ _, // wallClockNs
+ _ // field_1
+ ));
+
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestInvalidSchemaDecreasingFieldCount) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, atomTag);
+ AStatsEvent_addInt32Annotation(statsEvent, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY,
+ ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC);
+ AStatsEvent_overwriteTimestamp(statsEvent, configAddedTimeNs + 100);
+ // This event has one extra field.
+ AStatsEvent_writeString(statsEvent, "111");
+ AStatsEvent_writeInt32(statsEvent, 11);
+ std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, logEvent.get());
+
+ events.push_back(std::move(logEvent));
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 200));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ processor->WriteDataToDisk(DEVICE_SHUTDOWN, FAST,
+ event->GetElapsedTimestampNs() + 20 * NS_PER_SEC,
+ getWallClockNs());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(rowCountResult, 1);
+ // Event 2 Rejected
+ EXPECT_THAT(queryDataResult, ElementsAre(to_string(atomTag), to_string(configAddedTimeNs + 100),
+ _, // wallClockNs
+ "111", // field_1
+ to_string(11) // field_2
+ ));
+
+ EXPECT_THAT(columnNamesResult, ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs",
+ "field_1", "field_2"));
+
+ EXPECT_THAT(columnTypesResult, ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER,
+ SQLITE_TEXT, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestInvalidSchemaDifferentFieldType) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, atomTag);
+ AStatsEvent_addInt32Annotation(statsEvent, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY,
+ ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC);
+ AStatsEvent_overwriteTimestamp(statsEvent, configAddedTimeNs + 200);
+ // This event has a string instead of an int field
+ AStatsEvent_writeString(statsEvent, "test_string");
+ std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, logEvent.get());
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+ events.push_back(std::move(logEvent));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ processor->WriteDataToDisk(DEVICE_SHUTDOWN, FAST,
+ event->GetElapsedTimestampNs() + 20 * NS_PER_SEC,
+ getWallClockNs());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(rowCountResult, 1);
+ // Event 2 rejected.
+ EXPECT_THAT(queryDataResult, ElementsAre(to_string(atomTag), to_string(configAddedTimeNs + 100),
+ _, // wallClockNs
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestNewMetricSchemaAcrossReboot) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ int64_t originalEventElapsedTime = configAddedTimeNs + 100;
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ processor->OnLogEvent(event1.get());
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult,
+ ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime),
+ _, // wallTimestampNs
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+
+ // Create a new processor to simulate a reboot
+ auto processor2 =
+ CreateStatsLogProcessor(/*baseTimeNs=*/0, configAddedTimeNs, config, configKey,
+ /*puller=*/nullptr, /*atomTag=*/0, uidMap);
+
+ // Create a restricted event with one extra field.
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, atomTag);
+ AStatsEvent_addInt32Annotation(statsEvent, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY,
+ ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC);
+ AStatsEvent_overwriteTimestamp(statsEvent, originalEventElapsedTime + 100);
+ // This event has one extra field.
+ AStatsEvent_writeString(statsEvent, "111");
+ AStatsEvent_writeInt32(statsEvent, 11);
+ std::unique_ptr<LogEvent> event2 = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, event2.get());
+ processor2->OnLogEvent(event2.get());
+
+ processor2->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult,
+ ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime + 100),
+ _, // wallTimestampNs
+ to_string(111), // field_1
+ to_string(11) // field_2
+ ));
+ EXPECT_THAT(columnNamesResult, ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs",
+ "field_1", "field_2"));
+ EXPECT_THAT(columnTypesResult, ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER,
+ SQLITE_TEXT, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestOneEventMultipleUids) {
+ uidMap->updateApp(configAddedTimeNs, String16(delegate_package_name.c_str()),
+ /*uid=*/delegate_uid + 1, /*versionCode=*/1,
+ /*versionString=*/String16("v2"),
+ /*installer=*/String16(""), /*certificateHash=*/{});
+ uidMap->updateApp(configAddedTimeNs + 1, String16(config_package_name.c_str()),
+ /*uid=*/config_app_uid + 1, /*versionCode=*/1,
+ /*versionString=*/String16("v2"),
+ /*installer=*/String16(""), /*certificateHash=*/{});
+
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult, ElementsAre(to_string(atomTag), to_string(configAddedTimeNs + 100),
+ _, // wallClockNs
+ _ // field_1
+ ));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestOneEventStaticUid) {
+ ConfigKey key2(2000, configId); // shell uid
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, key2, config);
+
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/"AID_SHELL",
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult, ElementsAre(to_string(atomTag), to_string(configAddedTimeNs + 100),
+ _, // wallClockNs
+ _ // field_1
+ ));
+ dbutils::deleteDb(key2);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestTooManyConfigsAmbiguousQuery) {
+ ConfigKey key2(config_app_uid + 1, configId);
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, key2, config);
+
+ uidMap->updateApp(configAddedTimeNs, String16(delegate_package_name.c_str()),
+ /*uid=*/delegate_uid + 1, /*versionCode=*/1,
+ /*versionString=*/String16("v2"),
+ /*installer=*/String16(""), /*certificateHash=*/{});
+ uidMap->updateApp(configAddedTimeNs + 1, String16(config_package_name.c_str()),
+ /*uid=*/config_app_uid + 1, /*versionCode=*/1,
+ /*versionString=*/String16("v2"),
+ /*installer=*/String16(""), /*certificateHash=*/{});
+
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(error, "Ambiguous ConfigKey");
+ dbutils::deleteDb(key2);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestUnknownConfigPackage) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/"unknown.config.package",
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(error, "No configs found matching the config key");
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestUnknownDelegatePackage) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid + 1);
+
+ EXPECT_EQ(error, "No matching configs for restricted metrics delegate");
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestUnsupportedDatabaseVersion) {
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/INT_MAX,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_THAT(error, StartsWith("Unsupported sqlite version"));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestInvalidQuery) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM invalid_metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_THAT(error, StartsWith("failed to query db"));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestEnforceTtlRemovesOldEvents) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ // 8 days are used here because the TTL threshold is 7 days.
+ int64_t eightDaysAgo = currentWallTimeNs - 8 * 24 * 3600 * NS_PER_SEC;
+ int64_t originalEventElapsedTime = configAddedTimeNs + 100;
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ event1->setLogdWallClockTimestampNs(eightDaysAgo);
+
+ // Send log events to StatsLogProcessor.
+ processor->OnLogEvent(event1.get(), originalEventElapsedTime);
+ processor->WriteDataToDisk(DEVICE_SHUTDOWN, FAST, originalEventElapsedTime + 20 * NS_PER_SEC,
+ getWallClockNs());
+ processor->EnforceDataTtls(currentWallTimeNs, originalEventElapsedTime + 100);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ ASSERT_EQ(rows.size(), 0);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestConfigRemovalDeletesData) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+ // Query to make sure data is flushed
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ processor->OnConfigRemoved(configKey);
+
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_FALSE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+
+ EXPECT_THAT(err, StartsWith("unable to open database file"));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestConfigRemovalDeletesDataWithoutFlush) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+ processor->OnConfigRemoved(configKey);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_FALSE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+
+ EXPECT_THAT(err, StartsWith("unable to open database file"));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestConfigUpdateRestrictedDelegateCleared) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ // Update the existing config with no delegate
+ config.clear_restricted_metrics_delegate_package_name();
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, configKey, config);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_FALSE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ EXPECT_EQ(rows.size(), 0);
+ EXPECT_THAT(err, StartsWith("unable to open database file"));
+ dbutils::deleteDb(configKey);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestNonModularConfigUpdateRestrictedDelegate) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ // Update the existing config without modular update
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, configKey, config, false);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_FALSE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ EXPECT_EQ(rows.size(), 0);
+ EXPECT_THAT(err, StartsWith("no such table"));
+ dbutils::deleteDb(configKey);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestModularConfigUpdateNewRestrictedDelegate) {
+ config.clear_restricted_metrics_delegate_package_name();
+ // Update the existing config without a restricted delegate
+ processor->OnConfigUpdated(configAddedTimeNs + 10, configKey, config);
+
+ // Update the existing config with a new restricted delegate
+ config.set_restricted_metrics_delegate_package_name("new.delegate.package");
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, configKey, config);
+
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 2 * NS_PER_SEC));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ uint64_t dumpTimeNs = configAddedTimeNs + 100 * NS_PER_SEC;
+ ConfigMetricsReportList reports;
+ vector<uint8_t> buffer;
+ processor->onDumpReport(configKey, dumpTimeNs, true, true, ADB_DUMP, FAST, &buffer);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ ASSERT_EQ(reports.reports_size(), 0);
+
+ // Assert the config update was not modular and a RestrictedEventMetricProducer was created.
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ EXPECT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0],
+ ElementsAre(to_string(atomTag), to_string(configAddedTimeNs + 2 * NS_PER_SEC),
+ _, // wallClockNs
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestModularConfigUpdateChangeRestrictedDelegate) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ // Update the existing config with a new restricted delegate
+ int32_t newDelegateUid = delegate_uid + 1;
+ config.set_restricted_metrics_delegate_package_name("new.delegate.package");
+ uidMap->updateApp(configAddedTimeNs, String16("new.delegate.package"),
+ /*uid=*/newDelegateUid, /*versionCode=*/1,
+ /*versionString=*/String16("v2"),
+ /*installer=*/String16(""), /*certificateHash=*/{});
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, configKey, config);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/newDelegateUid);
+
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult, ElementsAre(to_string(atomTag), to_string(configAddedTimeNs + 100),
+ _, // wallClockNs
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestInvalidConfigUpdateRestrictedDelegate) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get());
+ }
+
+ EventMetric metricWithoutMatcher = createEventMetric("metricWithoutMatcher", 999999, nullopt);
+ *config.add_event_metric() = metricWithoutMatcher;
+ // Update the existing config with an invalid config update
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, configKey, config);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_FALSE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ EXPECT_EQ(rows.size(), 0);
+ EXPECT_THAT(err, StartsWith("unable to open database file"));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestRestrictedConfigUpdateDoesNotUpdateUidMap) {
+ auto& configKeyMap = processor->getUidMap()->mLastUpdatePerConfigKey;
+ EXPECT_EQ(configKeyMap.find(configKey), configKeyMap.end());
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestRestrictedConfigUpdateAddsDelegateRemovesUidMapEntry) {
+ auto& configKeyMap = processor->getUidMap()->mLastUpdatePerConfigKey;
+ config.clear_restricted_metrics_delegate_package_name();
+ // Update the existing config without a restricted delegate
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, configKey, config);
+ EXPECT_NE(configKeyMap.find(configKey), configKeyMap.end());
+ // Update the existing config with a new restricted delegate
+ config.set_restricted_metrics_delegate_package_name(delegate_package_name.c_str());
+ processor->OnConfigUpdated(configAddedTimeNs + 1 * NS_PER_SEC, configKey, config);
+ EXPECT_EQ(configKeyMap.find(configKey), configKeyMap.end());
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestLogEventsEnforceTtls) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ int64_t originalEventElapsedTime = configAddedTimeNs + 100;
+ // 2 hours used here because the TTL check period is 1 hour.
+ int64_t newEventElapsedTime = configAddedTimeNs + 2 * 3600 * NS_PER_SEC + 1; // 2 hrs later
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ event1->setLogdWallClockTimestampNs(eightDaysAgo);
+ std::unique_ptr<LogEvent> event2 =
+ CreateRestrictedLogEvent(atomTag, originalEventElapsedTime + 100);
+ event2->setLogdWallClockTimestampNs(oneDayAgo);
+ std::unique_ptr<LogEvent> event3 = CreateRestrictedLogEvent(atomTag, newEventElapsedTime);
+ event3->setLogdWallClockTimestampNs(currentWallTimeNs);
+
+ processor->mLastTtlTime = originalEventElapsedTime;
+ // Send log events to StatsLogProcessor.
+ processor->OnLogEvent(event1.get(), originalEventElapsedTime);
+ processor->OnLogEvent(event2.get(), newEventElapsedTime);
+ processor->OnLogEvent(event3.get(), newEventElapsedTime + 100);
+ processor->flushRestrictedDataLocked(newEventElapsedTime);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ ASSERT_EQ(rows.size(), 2);
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+ EXPECT_THAT(rows[0], ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime + 100),
+ to_string(oneDayAgo), _));
+ EXPECT_THAT(rows[1], ElementsAre(to_string(atomTag), to_string(newEventElapsedTime),
+ to_string(currentWallTimeNs), _));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestLogEventsDoesNotEnforceTtls) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ int64_t originalEventElapsedTime = configAddedTimeNs + 100;
+ // 30 min used here because the TTL check period is 1 hour.
+ int64_t newEventElapsedTime = configAddedTimeNs + (3600 * NS_PER_SEC) / 2; // 30 min later
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ event1->setLogdWallClockTimestampNs(eightDaysAgo);
+ std::unique_ptr<LogEvent> event2 = CreateRestrictedLogEvent(atomTag, newEventElapsedTime);
+ event2->setLogdWallClockTimestampNs(currentWallTimeNs);
+
+ processor->mLastTtlTime = originalEventElapsedTime;
+ // Send log events to StatsLogProcessor.
+ processor->OnLogEvent(event1.get(), originalEventElapsedTime);
+ processor->OnLogEvent(event2.get(), newEventElapsedTime);
+ processor->flushRestrictedDataLocked(newEventElapsedTime);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ ASSERT_EQ(rows.size(), 2);
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+ EXPECT_THAT(rows[0], ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime),
+ to_string(eightDaysAgo), _));
+ EXPECT_THAT(rows[1], ElementsAre(to_string(atomTag), to_string(newEventElapsedTime),
+ to_string(currentWallTimeNs), _));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestQueryEnforceTtls) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ int64_t originalEventElapsedTime = configAddedTimeNs + 100;
+ // 30 min used here because the TTL check period is 1 hour.
+ int64_t newEventElapsedTime = configAddedTimeNs + (3600 * NS_PER_SEC) / 2; // 30 min later
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ event1->setLogdWallClockTimestampNs(eightDaysAgo);
+ std::unique_ptr<LogEvent> event2 = CreateRestrictedLogEvent(atomTag, newEventElapsedTime);
+ event2->setLogdWallClockTimestampNs(currentWallTimeNs);
+
+ processor->mLastTtlTime = originalEventElapsedTime;
+ // Send log events to StatsLogProcessor.
+ processor->OnLogEvent(event1.get(), originalEventElapsedTime);
+ processor->OnLogEvent(event2.get(), newEventElapsedTime);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult, ElementsAre(to_string(atomTag), to_string(newEventElapsedTime),
+ to_string(currentWallTimeNs),
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestNotFlushed) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get(), event->GetElapsedTimestampNs());
+ }
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_FALSE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ EXPECT_EQ(rows.size(), 0);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestEnforceDbGuardrails) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ int64_t originalEventElapsedTime =
+ configAddedTimeNs + (3600 * NS_PER_SEC) * 2; // 2 hours after boot
+ // 2 hours used here because the TTL check period is 1 hour.
+ int64_t dbEnforcementTimeNs =
+ configAddedTimeNs + (3600 * NS_PER_SEC) * 4; // 4 hours after boot
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ event1->setLogdWallClockTimestampNs(currentWallTimeNs);
+ // Send log events to StatsLogProcessor.
+ processor->OnLogEvent(event1.get(), originalEventElapsedTime);
+
+ EXPECT_TRUE(StorageManager::hasFile(
+ base::StringPrintf("%s/%s", STATS_RESTRICTED_DATA_DIR, "123_12345.db").c_str()));
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult,
+ ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime),
+ to_string(currentWallTimeNs),
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+
+ processor->enforceDbGuardrailsIfNecessaryLocked(oneMonthLater, dbEnforcementTimeNs);
+
+ EXPECT_FALSE(StorageManager::hasFile(
+ base::StringPrintf("%s/%s", STATS_RESTRICTED_DATA_DIR, "123_12345.db").c_str()));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestEnforceDbGuardrailsDoesNotDeleteBeforeGuardrail) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ int64_t originalEventElapsedTime =
+ configAddedTimeNs + (3600 * NS_PER_SEC) * 2; // 2 hours after boot
+ // 2 hours used here because the TTL check period is 1 hour.
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ event1->setLogdWallClockTimestampNs(currentWallTimeNs);
+ // Send log events to StatsLogProcessor.
+ processor->OnLogEvent(event1.get(), originalEventElapsedTime);
+
+ EXPECT_TRUE(StorageManager::hasFile(
+ base::StringPrintf("%s/%s", STATS_RESTRICTED_DATA_DIR, "123_12345.db").c_str()));
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult,
+ ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime),
+ to_string(currentWallTimeNs),
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+
+ processor->enforceDbGuardrailsIfNecessaryLocked(oneMonthLater, originalEventElapsedTime);
+
+ EXPECT_TRUE(StorageManager::hasFile(
+ base::StringPrintf("%s/%s", STATS_RESTRICTED_DATA_DIR, "123_12345.db").c_str()));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestFlushInWriteDataToDisk) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get(), event->GetElapsedTimestampNs());
+ }
+
+ // Call WriteDataToDisk after 20 second because cooldown period is 15 second.
+ processor->WriteDataToDisk(DEVICE_SHUTDOWN, FAST, 20 * NS_PER_SEC, getWallClockNs());
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ EXPECT_EQ(rows.size(), 1);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestFlushPeriodically) {
+ std::vector<std::unique_ptr<LogEvent>> events;
+
+ events.push_back(CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100));
+ events.push_back(CreateRestrictedLogEvent(
+ atomTag, configAddedTimeNs + StatsdStats::kMinFlushRestrictedPeriodNs + 1));
+
+ // Send log events to StatsLogProcessor.
+ for (auto& event : events) {
+ processor->OnLogEvent(event.get(), event->GetElapsedTimestampNs());
+ }
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ // Only first event is flushed when second event is logged.
+ EXPECT_EQ(rows.size(), 1);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestOnLogEventMalformedDbNameDeleted) {
+ vector<string> emptyData;
+ string fileName = StringPrintf("%s/malformedname.db", STATS_RESTRICTED_DATA_DIR);
+ StorageManager::writeFile(fileName.c_str(), emptyData.data(), emptyData.size());
+ EXPECT_TRUE(StorageManager::hasFile(fileName.c_str()));
+ int64_t originalEventElapsedTime = configAddedTimeNs + 100;
+ // 2 hours used here because the TTL check period is 1 hour.
+ int64_t newEventElapsedTime = configAddedTimeNs + 2 * 3600 * NS_PER_SEC + 1; // 2 hrs later
+ std::unique_ptr<LogEvent> event2 = CreateRestrictedLogEvent(atomTag, newEventElapsedTime);
+ event2->setLogdWallClockTimestampNs(getWallClockNs());
+
+ processor->mLastTtlTime = originalEventElapsedTime;
+ // Send log events to StatsLogProcessor.
+ processor->OnLogEvent(event2.get(), newEventElapsedTime);
+
+ EXPECT_FALSE(StorageManager::hasFile(fileName.c_str()));
+ StorageManager::deleteFile(fileName.c_str());
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestRestrictedMetricSavesTtlToDisk) {
+ metadata::StatsMetadataList result;
+ processor->WriteMetadataToProto(getWallClockNs(), configAddedTimeNs, &result);
+
+ ASSERT_EQ(result.stats_metadata_size(), 1);
+ metadata::StatsMetadata statsMetadata = result.stats_metadata(0);
+ EXPECT_EQ(statsMetadata.config_key().config_id(), configId);
+ EXPECT_EQ(statsMetadata.config_key().uid(), config_app_uid);
+
+ ASSERT_EQ(statsMetadata.metric_metadata_size(), 1);
+ metadata::MetricMetadata metricMetadata = statsMetadata.metric_metadata(0);
+ EXPECT_EQ(metricMetadata.metric_id(), restrictedMetricId);
+ EXPECT_EQ(metricMetadata.restricted_category(), CATEGORY_UNKNOWN);
+ result.Clear();
+
+ std::unique_ptr<LogEvent> event = CreateRestrictedLogEvent(atomTag, configAddedTimeNs + 100);
+ processor->OnLogEvent(event.get());
+ processor->WriteMetadataToProto(getWallClockNs(), configAddedTimeNs, &result);
+
+ ASSERT_EQ(result.stats_metadata_size(), 1);
+ statsMetadata = result.stats_metadata(0);
+ EXPECT_EQ(statsMetadata.config_key().config_id(), configId);
+ EXPECT_EQ(statsMetadata.config_key().uid(), config_app_uid);
+
+ ASSERT_EQ(statsMetadata.metric_metadata_size(), 1);
+ metricMetadata = statsMetadata.metric_metadata(0);
+ EXPECT_EQ(metricMetadata.metric_id(), restrictedMetricId);
+ EXPECT_EQ(metricMetadata.restricted_category(), CATEGORY_DIAGNOSTIC);
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestRestrictedMetricLoadsTtlFromDisk) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ int64_t originalEventElapsedTime = configAddedTimeNs + 100;
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ event1->setLogdWallClockTimestampNs(eightDaysAgo);
+ processor->OnLogEvent(event1.get(), originalEventElapsedTime);
+ processor->flushRestrictedDataLocked(originalEventElapsedTime);
+ int64_t wallClockNs = 1584991200 * NS_PER_SEC; // random time
+ int64_t metadataWriteTime = originalEventElapsedTime + 5000 * NS_PER_SEC;
+ processor->SaveMetadataToDisk(wallClockNs, metadataWriteTime);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+ EXPECT_THAT(rows[0], ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime),
+ to_string(eightDaysAgo), _));
+
+ auto processor2 =
+ CreateStatsLogProcessor(/*baseTimeNs=*/0, configAddedTimeNs, config, configKey,
+ /*puller=*/nullptr, /*atomTag=*/0, uidMap);
+ // 2 hours used here because the TTL check period is 1 hour.
+ int64_t newEventElapsedTime = configAddedTimeNs + 2 * 3600 * NS_PER_SEC + 1; // 2 hrs later
+ processor2->LoadMetadataFromDisk(wallClockNs, newEventElapsedTime);
+
+ // Log another event and check that the original TTL is maintained across reboot
+ std::unique_ptr<LogEvent> event2 = CreateRestrictedLogEvent(atomTag, newEventElapsedTime);
+ event2->setLogdWallClockTimestampNs(currentWallTimeNs);
+ processor2->OnLogEvent(event2.get(), newEventElapsedTime);
+ processor2->flushRestrictedDataLocked(newEventElapsedTime);
+
+ columnTypes.clear();
+ columnNames.clear();
+ rows.clear();
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+ EXPECT_THAT(rows[0], ElementsAre(to_string(atomTag), to_string(newEventElapsedTime),
+ to_string(currentWallTimeNs), _));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestNewRestrictionCategoryEventDeletesTable) {
+ int64_t currentWallTimeNs = getWallClockNs();
+ int64_t originalEventElapsedTime = configAddedTimeNs + 100;
+ std::unique_ptr<LogEvent> event1 =
+ CreateNonRestrictedLogEvent(atomTag, originalEventElapsedTime);
+ processor->OnLogEvent(event1.get());
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << dbutils::reformatMetricId(restrictedMetricId);
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult,
+ ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime),
+ _, // wallTimestampNs
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+
+ // Log a second event that will go into the cache
+ std::unique_ptr<LogEvent> event2 =
+ CreateNonRestrictedLogEvent(atomTag, originalEventElapsedTime + 100);
+ processor->OnLogEvent(event2.get());
+
+ // Log a third event with a different category
+ std::unique_ptr<LogEvent> event3 =
+ CreateRestrictedLogEvent(atomTag, originalEventElapsedTime + 200);
+ processor->OnLogEvent(event3.get());
+
+ processor->querySql(query.str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult,
+ ElementsAre(to_string(atomTag), to_string(originalEventElapsedTime + 200),
+ _, // wallTimestampNs
+ _ // field_1
+ ));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER));
+}
+
+TEST_F(RestrictedEventMetricE2eTest, TestDeviceInfoTableCreated) {
+ std::string query = "SELECT * FROM device_info";
+ processor->querySql(query.c_str(), /*minSqlClientVersion=*/0,
+ /*policyConfig=*/{}, mockStatsQueryCallback,
+ /*configKey=*/configId, /*configPackage=*/config_package_name,
+ /*callingUid=*/delegate_uid);
+ EXPECT_EQ(rowCountResult, 1);
+ EXPECT_THAT(queryDataResult, ElementsAre(_, _, _, _, _, _, _, _, _, _));
+ EXPECT_THAT(columnNamesResult,
+ ElementsAre("sdkVersion", "model", "product", "hardware", "device", "osBuild",
+ "fingerprint", "brand", "manufacturer", "board"));
+ EXPECT_THAT(columnTypesResult,
+ ElementsAre(SQLITE_INTEGER, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT,
+ SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT));
+}
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp b/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
index 1ea757a..1d8307b 100644
--- a/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
+++ b/statsd/tests/e2e/ValueMetric_pull_e2e_test.cpp
@@ -15,12 +15,12 @@
#include <android/binder_interface_utils.h>
#include <gtest/gtest.h>
+#include <vector>
+
#include "src/StatsLogProcessor.h"
#include "src/stats_log_util.h"
#include "tests/statsd_test_util.h"
-#include <vector>
-
using ::ndk::SharedRefBase;
namespace android {
@@ -60,6 +60,8 @@
valueMetric->set_use_absolute_value_on_reset(true);
valueMetric->set_skip_zero_diff_output(false);
valueMetric->set_max_pull_delay_sec(INT_MAX);
+ valueMetric->set_split_bucket_for_app_upgrade(true);
+ valueMetric->set_min_bucket_size_nanos(1000);
return config;
}
@@ -301,6 +303,23 @@
EXPECT_EQ(baseTimeNs + 7 * bucketSizeNs, data.bucket_info(3).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 8 * bucketSizeNs, data.bucket_info(3).end_bucket_elapsed_nanos());
ASSERT_EQ(1, data.bucket_info(3).values_size());
+
+ valueMetrics = reports.reports(0).metrics(0).value_metrics();
+ ASSERT_EQ(2, valueMetrics.skipped_size());
+
+ StatsLogReport::SkippedBuckets skipped = valueMetrics.skipped(0);
+ EXPECT_EQ(BucketDropReason::CONDITION_UNKNOWN, skipped.drop_event(0).drop_reason());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 2 * bucketSizeNs)),
+ skipped.start_bucket_elapsed_nanos());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 3 * bucketSizeNs)),
+ skipped.end_bucket_elapsed_nanos());
+
+ skipped = valueMetrics.skipped(1);
+ EXPECT_EQ(BucketDropReason::NO_DATA, skipped.drop_event(0).drop_reason());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 5 * bucketSizeNs)),
+ skipped.start_bucket_elapsed_nanos());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 6 * bucketSizeNs)),
+ skipped.end_bucket_elapsed_nanos());
}
TEST(ValueMetricE2eTest, TestPulledEvents_LateAlarm) {
@@ -404,6 +423,30 @@
EXPECT_EQ(baseTimeNs + 9 * bucketSizeNs, data.bucket_info(2).start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 10 * bucketSizeNs, data.bucket_info(2).end_bucket_elapsed_nanos());
ASSERT_EQ(1, data.bucket_info(2).values_size());
+
+ valueMetrics = reports.reports(0).metrics(0).value_metrics();
+ ASSERT_EQ(3, valueMetrics.skipped_size());
+
+ StatsLogReport::SkippedBuckets skipped = valueMetrics.skipped(0);
+ EXPECT_EQ(BucketDropReason::CONDITION_UNKNOWN, skipped.drop_event(0).drop_reason());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 2 * bucketSizeNs)),
+ skipped.start_bucket_elapsed_nanos());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 5 * bucketSizeNs)),
+ skipped.end_bucket_elapsed_nanos());
+
+ skipped = valueMetrics.skipped(1);
+ EXPECT_EQ(BucketDropReason::NO_DATA, skipped.drop_event(0).drop_reason());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 6 * bucketSizeNs)),
+ skipped.start_bucket_elapsed_nanos());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 7 * bucketSizeNs)),
+ skipped.end_bucket_elapsed_nanos());
+
+ skipped = valueMetrics.skipped(2);
+ EXPECT_EQ(BucketDropReason::NO_DATA, skipped.drop_event(0).drop_reason());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 7 * bucketSizeNs)),
+ skipped.start_bucket_elapsed_nanos());
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 8 * bucketSizeNs)),
+ skipped.end_bucket_elapsed_nanos());
}
TEST(ValueMetricE2eTest, TestPulledEvents_WithActivation) {
@@ -422,6 +465,8 @@
event_activation->set_atom_matcher_id(batterySaverStartMatcher.id());
event_activation->set_ttl_seconds(ttlNs / 1000000000);
+ StatsdStats::getInstance().reset();
+
ConfigKey cfgKey;
auto processor = CreateStatsLogProcessor(baseTimeNs, configAddedTimeNs, config, cfgKey,
SharedRefBase::make<FakeSubsystemSleepCallback>(),
@@ -430,10 +475,10 @@
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->isConfigValid());
processor->mPullerManager->ForceClearPullerCache();
- int startBucketNum = processor->mMetricsManagers.begin()
- ->second->mAllMetricProducers[0]
- ->getCurrentBucketNum();
- EXPECT_GT(startBucketNum, (int64_t)0);
+ const int startBucketNum = processor->mMetricsManagers.begin()
+ ->second->mAllMetricProducers[0]
+ ->getCurrentBucketNum();
+ EXPECT_EQ(startBucketNum, 2);
EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
// When creating the config, the value metric producer should register the alarm at the
@@ -445,38 +490,110 @@
processor->mPullerManager->mReceivers.begin()->second.front().nextPullTimeNs;
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + bucketSizeNs, expectedPullTimeNs);
- // Pulling alarm arrives on time and reset the sequential pulling alarm.
+ // Initialize metric.
+ const int64_t metricInitTimeNs = configAddedTimeNs + 1; // 10 mins + 1 ns.
+ processor->onStatsdInitCompleted(metricInitTimeNs);
+
+ // Check no pull occurred since metric not active.
+ StatsdStatsReport_PulledAtomStats pulledAtomStats =
+ getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
+ EXPECT_EQ(pulledAtomStats.atom_id(), util::SUBSYSTEM_SLEEP_STATE);
+ EXPECT_EQ(pulledAtomStats.total_pull(), 0);
+
+ // Check skip bucket is not added when metric is not active.
+ int64_t dumpReportTimeNs = metricInitTimeNs + 1; // 10 mins + 2 ns.
+ vector<uint8_t> buffer;
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, true /* include_current_partial_bucket */,
+ true /* erase_data */, ADB_DUMP, FAST, &buffer);
+ ConfigMetricsReportList reports;
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ StatsLogReport::ValueMetricDataWrapper valueMetrics =
+ reports.reports(0).metrics(0).value_metrics();
+ EXPECT_EQ(valueMetrics.skipped_size(), 0);
+
+ // App upgrade.
+ const int64_t appUpgradeTimeNs = dumpReportTimeNs + 1; // 10 mins + 3 ns.
+ processor->notifyAppUpgrade(appUpgradeTimeNs, "appName", 1000 /* uid */, 2 /* version */);
+
+ // Check no pull occurred since metric not active.
+ pulledAtomStats = getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
+ EXPECT_EQ(pulledAtomStats.atom_id(), util::SUBSYSTEM_SLEEP_STATE);
+ EXPECT_EQ(pulledAtomStats.total_pull(), 0);
+
+ // Check skip bucket is not added when metric is not active.
+ dumpReportTimeNs = appUpgradeTimeNs + 1; // 10 mins + 4 ns.
+ buffer.clear();
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, true /* include_current_partial_bucket */,
+ true /* erase_data */, ADB_DUMP, FAST, &buffer);
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ valueMetrics = reports.reports(0).metrics(0).value_metrics();
+ EXPECT_EQ(valueMetrics.skipped_size(), 0);
+
+ // Dump report with a pull. The pull should not happen because metric is inactive.
+ dumpReportTimeNs = dumpReportTimeNs + 1; // 10 mins + 6 ns.
+ buffer.clear();
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, true /* include_current_partial_bucket */,
+ true /* erase_data */, ADB_DUMP, NO_TIME_CONSTRAINTS, &buffer);
+ pulledAtomStats = getPulledAtomStats(util::SUBSYSTEM_SLEEP_STATE);
+ EXPECT_EQ(pulledAtomStats.atom_id(), util::SUBSYSTEM_SLEEP_STATE);
+ EXPECT_EQ(pulledAtomStats.total_pull(), 0);
+
+ // Check skipped bucket is not added from the dump operation when metric is not active.
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ valueMetrics = reports.reports(0).metrics(0).value_metrics();
+ EXPECT_EQ(valueMetrics.skipped_size(), 0);
+
+ // Pulling alarm arrives on time and reset the sequential pulling alarm. This bucket is skipped.
processor->informPullAlarmFired(expectedPullTimeNs + 1); // 15 mins + 1 ns.
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 2 * bucketSizeNs, expectedPullTimeNs);
EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
- // Activate the metric. A pull occurs here
+ // Activate the metric. A pull occurs here that sets the base.
+ // 15 mins + 2 ms
const int64_t activationNs = configAddedTimeNs + bucketSizeNs + (2 * 1000 * 1000); // 2 millis.
auto batterySaverOnEvent = CreateBatterySaverOnEvent(activationNs);
processor->OnLogEvent(batterySaverOnEvent.get()); // 15 mins + 2 ms.
EXPECT_TRUE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
+ // This bucket should be kept. 1 total
processor->informPullAlarmFired(expectedPullTimeNs + 1); // 20 mins + 1 ns.
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 3 * bucketSizeNs, expectedPullTimeNs);
+ // 25 mins + 2 ns.
+ // This bucket should be kept. 2 total
processor->informPullAlarmFired(expectedPullTimeNs + 2); // 25 mins + 2 ns.
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 4 * bucketSizeNs, expectedPullTimeNs);
// Create random event to deactivate metric.
- auto deactivationEvent = CreateScreenBrightnessChangedEvent(activationNs + ttlNs + 1, 50);
+ // A pull occurs here and a partial bucket is created. The bucket ending here is kept. 3 total.
+ // 25 mins + 2 ms + 1 ns.
+ const int64_t deactivationNs = activationNs + ttlNs + 1;
+ auto deactivationEvent = CreateScreenBrightnessChangedEvent(deactivationNs, 50);
processor->OnLogEvent(deactivationEvent.get());
EXPECT_FALSE(processor->mMetricsManagers.begin()->second->mAllMetricProducers[0]->isActive());
+ // 30 mins + 3 ns. This bucket is skipped.
processor->informPullAlarmFired(expectedPullTimeNs + 3);
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 5 * bucketSizeNs, expectedPullTimeNs);
+ // 35 mins + 4 ns. This bucket is skipped
processor->informPullAlarmFired(expectedPullTimeNs + 4);
EXPECT_EQ(baseTimeNs + startBucketNum * bucketSizeNs + 6 * bucketSizeNs, expectedPullTimeNs);
- ConfigMetricsReportList reports;
- vector<uint8_t> buffer;
- processor->onDumpReport(cfgKey, configAddedTimeNs + 7 * bucketSizeNs + 10, false, true,
- ADB_DUMP, FAST, &buffer);
+ dumpReportTimeNs = configAddedTimeNs + 6 * bucketSizeNs + 10;
+ buffer.clear();
+ // 40 mins + 10 ns.
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, false /* include_current_partial_bucket */,
+ true /* erase_data */, ADB_DUMP, FAST, &buffer);
EXPECT_TRUE(buffer.size() > 0);
EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
backfillDimensionPath(&reports);
@@ -484,7 +601,7 @@
backfillStartEndTimestamp(&reports);
ASSERT_EQ(1, reports.reports_size());
ASSERT_EQ(1, reports.reports(0).metrics_size());
- StatsLogReport::ValueMetricDataWrapper valueMetrics;
+ valueMetrics = StatsLogReport::ValueMetricDataWrapper();
sortMetricDataByDimensionsValue(reports.reports(0).metrics(0).value_metrics(), &valueMetrics);
ASSERT_GT((int)valueMetrics.data_size(), 0);
@@ -494,8 +611,8 @@
EXPECT_EQ(1 /* subsystem name field */,
data.dimensions_in_what().value_tuple().dimensions_value(0).field());
EXPECT_FALSE(data.dimensions_in_what().value_tuple().dimensions_value(0).value_str().empty());
- // We have 2 full buckets, the two surrounding the activation are dropped.
- ASSERT_EQ(2, data.bucket_info_size());
+ // We have 3 full buckets, the two surrounding the activation are dropped.
+ ASSERT_EQ(3, data.bucket_info_size());
auto bucketInfo = data.bucket_info(0);
EXPECT_EQ(baseTimeNs + 3 * bucketSizeNs, bucketInfo.start_bucket_elapsed_nanos());
@@ -506,6 +623,25 @@
EXPECT_EQ(baseTimeNs + 4 * bucketSizeNs, bucketInfo.start_bucket_elapsed_nanos());
EXPECT_EQ(baseTimeNs + 5 * bucketSizeNs, bucketInfo.end_bucket_elapsed_nanos());
ASSERT_EQ(1, bucketInfo.values_size());
+
+ bucketInfo = data.bucket_info(2);
+ EXPECT_EQ(MillisToNano(NanoToMillis(baseTimeNs + 5 * bucketSizeNs)),
+ bucketInfo.start_bucket_elapsed_nanos());
+ EXPECT_EQ(MillisToNano(NanoToMillis(deactivationNs)), bucketInfo.end_bucket_elapsed_nanos());
+ ASSERT_EQ(1, bucketInfo.values_size());
+
+ // Check skipped bucket is not added after deactivation.
+ dumpReportTimeNs = configAddedTimeNs + 7 * bucketSizeNs + 10;
+ buffer.clear();
+ // 45 mins + 10 ns.
+ processor->onDumpReport(cfgKey, dumpReportTimeNs, true /* include_current_partial_bucket */,
+ true /* erase_data */, ADB_DUMP, FAST, &buffer);
+ EXPECT_TRUE(buffer.size() > 0);
+ EXPECT_TRUE(reports.ParseFromArray(&buffer[0], buffer.size()));
+ ASSERT_EQ(1, reports.reports_size());
+ ASSERT_EQ(1, reports.reports(0).metrics_size());
+ valueMetrics = reports.reports(0).metrics(0).value_metrics();
+ EXPECT_EQ(valueMetrics.skipped_size(), 0);
}
/**
diff --git a/statsd/tests/external/puller_util_test.cpp b/statsd/tests/external/puller_util_test.cpp
index 25c0c7e..71f4de4 100644
--- a/statsd/tests/external/puller_util_test.cpp
+++ b/statsd/tests/external/puller_util_test.cpp
@@ -22,7 +22,6 @@
#include "../metrics/metrics_test_helper.h"
#include "FieldValue.h"
-#include "annotations.h"
#include "stats_event.h"
#include "tests/statsd_test_util.h"
diff --git a/statsd/tests/guardrail/StatsdStats_test.cpp b/statsd/tests/guardrail/StatsdStats_test.cpp
index 81fcbe9..a2f3280 100644
--- a/statsd/tests/guardrail/StatsdStats_test.cpp
+++ b/statsd/tests/guardrail/StatsdStats_test.cpp
@@ -13,12 +13,15 @@
// limitations under the License.
#include "src/guardrail/StatsdStats.h"
-#include "statslog_statsdtest.h"
-#include "tests/statsd_test_util.h"
#include <gtest/gtest.h>
+
#include <vector>
+#include "src/metrics/parsing_utils/metrics_manager_util.h"
+#include "statslog_statsdtest.h"
+#include "tests/statsd_test_util.h"
+
#ifdef __ANDROID__
namespace android {
@@ -35,7 +38,7 @@
const int matchersCount = 30;
const int alertsCount = 10;
stats.noteConfigReceived(key, metricsCount, conditionsCount, matchersCount, alertsCount, {},
- true /*valid config*/);
+ nullopt /*valid config*/);
vector<uint8_t> output;
stats.dumpStats(&output, false /*reset stats*/);
@@ -51,6 +54,7 @@
EXPECT_EQ(matchersCount, configReport.matcher_count());
EXPECT_EQ(alertsCount, configReport.alert_count());
EXPECT_EQ(true, configReport.is_valid());
+ EXPECT_FALSE(configReport.has_invalid_config_reason());
EXPECT_FALSE(configReport.has_deletion_time_sec());
}
@@ -61,8 +65,19 @@
const int conditionsCount = 20;
const int matchersCount = 30;
const int alertsCount = 10;
+ optional<InvalidConfigReason> invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_UNKNOWN, 1);
+ invalidConfigReason->stateId = 2;
+ invalidConfigReason->alertId = 3;
+ invalidConfigReason->alarmId = 4;
+ invalidConfigReason->subscriptionId = 5;
+ invalidConfigReason->matcherIds.push_back(6);
+ invalidConfigReason->matcherIds.push_back(7);
+ invalidConfigReason->conditionIds.push_back(8);
+ invalidConfigReason->conditionIds.push_back(9);
+ invalidConfigReason->conditionIds.push_back(10);
stats.noteConfigReceived(key, metricsCount, conditionsCount, matchersCount, alertsCount, {},
- false /*bad config*/);
+ invalidConfigReason /*bad config*/);
vector<uint8_t> output;
stats.dumpStats(&output, false);
@@ -73,6 +88,96 @@
const auto& configReport = report.config_stats(0);
// The invalid config should be put into icebox with a deletion time.
EXPECT_TRUE(configReport.has_deletion_time_sec());
+ EXPECT_TRUE(configReport.has_invalid_config_reason());
+ EXPECT_EQ(configReport.invalid_config_reason().reason(), INVALID_CONFIG_REASON_UNKNOWN);
+ EXPECT_EQ(configReport.invalid_config_reason().metric_id(), 1);
+ EXPECT_EQ(configReport.invalid_config_reason().state_id(), 2);
+ EXPECT_EQ(configReport.invalid_config_reason().alert_id(), 3);
+ EXPECT_EQ(configReport.invalid_config_reason().alarm_id(), 4);
+ EXPECT_EQ(configReport.invalid_config_reason().subscription_id(), 5);
+ EXPECT_EQ(configReport.invalid_config_reason().matcher_id_size(), 2);
+ EXPECT_EQ(configReport.invalid_config_reason().matcher_id(0), 6);
+ EXPECT_EQ(configReport.invalid_config_reason().matcher_id(1), 7);
+ EXPECT_EQ(configReport.invalid_config_reason().condition_id_size(), 3);
+ EXPECT_EQ(configReport.invalid_config_reason().condition_id(0), 8);
+ EXPECT_EQ(configReport.invalid_config_reason().condition_id(1), 9);
+ EXPECT_EQ(configReport.invalid_config_reason().condition_id(2), 10);
+}
+
+TEST(StatsdStatsTest, TestInvalidConfigMissingMetricId) {
+ StatsdStats stats;
+ ConfigKey key(0, 12345);
+ const int metricsCount = 10;
+ const int conditionsCount = 20;
+ const int matchersCount = 30;
+ const int alertsCount = 10;
+ optional<InvalidConfigReason> invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_SUBSCRIPTION_SUBSCRIBER_INFO_MISSING);
+ invalidConfigReason->stateId = 1;
+ invalidConfigReason->alertId = 2;
+ invalidConfigReason->alarmId = 3;
+ invalidConfigReason->subscriptionId = 4;
+ invalidConfigReason->matcherIds.push_back(5);
+ invalidConfigReason->conditionIds.push_back(6);
+ invalidConfigReason->conditionIds.push_back(7);
+ stats.noteConfigReceived(key, metricsCount, conditionsCount, matchersCount, alertsCount, {},
+ invalidConfigReason /*bad config*/);
+ vector<uint8_t> output;
+ stats.dumpStats(&output, false);
+
+ StatsdStatsReport report;
+ bool good = report.ParseFromArray(&output[0], output.size());
+ EXPECT_TRUE(good);
+ ASSERT_EQ(1, report.config_stats_size());
+ const auto& configReport = report.config_stats(0);
+ // The invalid config should be put into icebox with a deletion time.
+ EXPECT_TRUE(configReport.has_deletion_time_sec());
+ EXPECT_TRUE(configReport.has_invalid_config_reason());
+ EXPECT_EQ(configReport.invalid_config_reason().reason(),
+ INVALID_CONFIG_REASON_SUBSCRIPTION_SUBSCRIBER_INFO_MISSING);
+ EXPECT_FALSE(configReport.invalid_config_reason().has_metric_id());
+ EXPECT_EQ(configReport.invalid_config_reason().state_id(), 1);
+ EXPECT_EQ(configReport.invalid_config_reason().alert_id(), 2);
+ EXPECT_EQ(configReport.invalid_config_reason().alarm_id(), 3);
+ EXPECT_EQ(configReport.invalid_config_reason().subscription_id(), 4);
+ EXPECT_EQ(configReport.invalid_config_reason().matcher_id_size(), 1);
+ EXPECT_EQ(configReport.invalid_config_reason().matcher_id(0), 5);
+ EXPECT_EQ(configReport.invalid_config_reason().condition_id_size(), 2);
+ EXPECT_EQ(configReport.invalid_config_reason().condition_id(0), 6);
+ EXPECT_EQ(configReport.invalid_config_reason().condition_id(1), 7);
+}
+
+TEST(StatsdStatsTest, TestInvalidConfigOnlyMetricId) {
+ StatsdStats stats;
+ ConfigKey key(0, 12345);
+ const int metricsCount = 10;
+ const int conditionsCount = 20;
+ const int matchersCount = 30;
+ const int alertsCount = 10;
+ optional<InvalidConfigReason> invalidConfigReason =
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_NOT_IN_PREV_CONFIG, 1);
+ stats.noteConfigReceived(key, metricsCount, conditionsCount, matchersCount, alertsCount, {},
+ invalidConfigReason /*bad config*/);
+ vector<uint8_t> output;
+ stats.dumpStats(&output, false);
+
+ StatsdStatsReport report;
+ bool good = report.ParseFromArray(&output[0], output.size());
+ EXPECT_TRUE(good);
+ ASSERT_EQ(1, report.config_stats_size());
+ const auto& configReport = report.config_stats(0);
+ // The invalid config should be put into icebox with a deletion time.
+ EXPECT_TRUE(configReport.has_deletion_time_sec());
+ EXPECT_TRUE(configReport.has_invalid_config_reason());
+ EXPECT_EQ(configReport.invalid_config_reason().reason(),
+ INVALID_CONFIG_REASON_METRIC_NOT_IN_PREV_CONFIG);
+ EXPECT_EQ(configReport.invalid_config_reason().metric_id(), 1);
+ EXPECT_FALSE(configReport.invalid_config_reason().has_state_id());
+ EXPECT_FALSE(configReport.invalid_config_reason().has_alert_id());
+ EXPECT_FALSE(configReport.invalid_config_reason().has_alarm_id());
+ EXPECT_FALSE(configReport.invalid_config_reason().has_subscription_id());
+ EXPECT_EQ(configReport.invalid_config_reason().matcher_id_size(), 0);
+ EXPECT_EQ(configReport.invalid_config_reason().condition_id_size(), 0);
}
TEST(StatsdStatsTest, TestConfigRemove) {
@@ -83,7 +188,7 @@
const int matchersCount = 30;
const int alertsCount = 10;
stats.noteConfigReceived(key, metricsCount, conditionsCount, matchersCount, alertsCount, {},
- true);
+ nullopt);
vector<uint8_t> output;
stats.dumpStats(&output, false);
StatsdStatsReport report;
@@ -105,7 +210,7 @@
TEST(StatsdStatsTest, TestSubStats) {
StatsdStats stats;
ConfigKey key(0, 12345);
- stats.noteConfigReceived(key, 2, 3, 4, 5, {std::make_pair(123, 456)}, true);
+ stats.noteConfigReceived(key, 2, 3, 4, 5, {std::make_pair(123, 456)}, nullopt);
stats.noteMatcherMatched(key, StringToId("matcher1"));
stats.noteMatcherMatched(key, StringToId("matcher1"));
@@ -222,11 +327,11 @@
StatsdStats stats;
time_t now = time(nullptr);
// old event, we get it from the stats buffer. should be ignored.
- stats.noteAtomLogged(util::SENSOR_STATE_CHANGED, 1000);
+ stats.noteAtomLogged(util::SENSOR_STATE_CHANGED, 1000, false);
- stats.noteAtomLogged(util::SENSOR_STATE_CHANGED, now + 1);
- stats.noteAtomLogged(util::SENSOR_STATE_CHANGED, now + 2);
- stats.noteAtomLogged(util::APP_CRASH_OCCURRED, now + 3);
+ stats.noteAtomLogged(util::SENSOR_STATE_CHANGED, now + 1, false);
+ stats.noteAtomLogged(util::SENSOR_STATE_CHANGED, now + 2, false);
+ stats.noteAtomLogged(util::APP_CRASH_OCCURRED, now + 3, false);
vector<uint8_t> output;
stats.dumpStats(&output, false);
@@ -245,6 +350,8 @@
if (atomStats.tag() == util::APP_CRASH_OCCURRED && atomStats.count() == 1) {
dropboxAtomGood = true;
}
+ EXPECT_FALSE(atomStats.has_dropped_count());
+ EXPECT_FALSE(atomStats.has_skip_count());
}
EXPECT_TRUE(dropboxAtomGood);
@@ -257,9 +364,9 @@
int newAtom1 = StatsdStats::kMaxPushedAtomId + 1;
int newAtom2 = StatsdStats::kMaxPushedAtomId + 2;
- stats.noteAtomLogged(newAtom1, now + 1);
- stats.noteAtomLogged(newAtom1, now + 2);
- stats.noteAtomLogged(newAtom2, now + 3);
+ stats.noteAtomLogged(newAtom1, now + 1, false);
+ stats.noteAtomLogged(newAtom1, now + 2, false);
+ stats.noteAtomLogged(newAtom2, now + 3, false);
vector<uint8_t> output;
stats.dumpStats(&output, false);
@@ -278,6 +385,8 @@
if (atomStats.tag() == newAtom2 && atomStats.count() == 1) {
newAtom2Good = true;
}
+ EXPECT_FALSE(atomStats.has_dropped_count());
+ EXPECT_FALSE(atomStats.has_skip_count());
}
EXPECT_TRUE(newAtom1Good);
@@ -372,6 +481,96 @@
EXPECT_EQ(1L, atomStats2.max_bucket_boundary_delay_ns());
}
+TEST(StatsdStatsTest, TestRestrictedMetricsStats) {
+ StatsdStats stats;
+ const int64_t metricId = -1234556L;
+ ConfigKey key(0, 12345);
+ stats.noteConfigReceived(key, 2, 3, 4, 5, {}, nullopt);
+ stats.noteRestrictedMetricInsertError(key, metricId);
+ stats.noteRestrictedMetricTableCreationError(key, metricId);
+ stats.noteRestrictedMetricTableDeletionError(key, metricId);
+ stats.noteDeviceInfoTableCreationFailed(key);
+ stats.noteRestrictedMetricFlushLatency(key, metricId, 3000);
+ stats.noteRestrictedMetricFlushLatency(key, metricId, 3001);
+ stats.noteRestrictedMetricCategoryChanged(key, metricId);
+ stats.noteRestrictedConfigFlushLatency(key, 4000);
+ ConfigKey configKeyWithoutError(0, 666);
+ stats.noteConfigReceived(configKeyWithoutError, 2, 3, 4, 5, {}, nullopt);
+ stats.noteDbCorrupted(key);
+ stats.noteDbCorrupted(key);
+ stats.noteRestrictedConfigDbSize(key, 999, 111);
+
+ vector<uint8_t> output;
+ stats.dumpStats(&output, false);
+ StatsdStatsReport report;
+ bool good = report.ParseFromArray(&output[0], output.size());
+ EXPECT_TRUE(good);
+
+ ASSERT_EQ(2, report.config_stats().size());
+ ASSERT_EQ(0, report.config_stats(0).restricted_metric_stats().size());
+ ASSERT_EQ(1, report.config_stats(1).restricted_metric_stats().size());
+ EXPECT_EQ(1, report.config_stats(1).restricted_metric_stats(0).insert_error());
+ EXPECT_EQ(1, report.config_stats(1).restricted_metric_stats(0).table_creation_error());
+ EXPECT_EQ(1, report.config_stats(1).restricted_metric_stats(0).table_deletion_error());
+ EXPECT_EQ(1, report.config_stats(1).restricted_metric_stats(0).category_changed_count());
+ ASSERT_EQ(2, report.config_stats(1).restricted_metric_stats(0).flush_latency_ns().size());
+ EXPECT_EQ(3000, report.config_stats(1).restricted_metric_stats(0).flush_latency_ns(0));
+ EXPECT_EQ(3001, report.config_stats(1).restricted_metric_stats(0).flush_latency_ns(1));
+ ASSERT_EQ(1, report.config_stats(1).restricted_db_size_time_sec().size());
+ EXPECT_EQ(999, report.config_stats(1).restricted_db_size_time_sec(0));
+ ASSERT_EQ(1, report.config_stats(1).restricted_db_size_bytes().size());
+ EXPECT_EQ(111, report.config_stats(1).restricted_db_size_bytes(0));
+ ASSERT_EQ(1, report.config_stats(1).restricted_flush_latency().size());
+ EXPECT_EQ(4000, report.config_stats(1).restricted_flush_latency(0));
+ EXPECT_TRUE(report.config_stats(1).device_info_table_creation_failed());
+ EXPECT_EQ(metricId, report.config_stats(1).restricted_metric_stats(0).restricted_metric_id());
+ EXPECT_EQ(2, report.config_stats(1).restricted_db_corrupted_count());
+}
+
+TEST(StatsdStatsTest, TestRestrictedMetricsQueryStats) {
+ StatsdStats stats;
+ const int32_t callingUid = 100;
+ ConfigKey configKey(0, 12345);
+ const string configPackage = "com.google.android.gm";
+ int64_t beforeNoteMetricSucceed = getWallClockNs();
+ stats.noteQueryRestrictedMetricSucceed(configKey.GetId(), configPackage, configKey.GetUid(),
+ callingUid, /*queryLatencyNs=*/5 * NS_PER_SEC);
+ int64_t afterNoteMetricSucceed = getWallClockNs();
+
+ const int64_t configIdWithError = 111;
+ stats.noteQueryRestrictedMetricFailed(configIdWithError, configPackage, std::nullopt,
+ callingUid, InvalidQueryReason(AMBIGUOUS_CONFIG_KEY));
+ stats.noteQueryRestrictedMetricFailed(configIdWithError, configPackage, std::nullopt,
+ callingUid, InvalidQueryReason(AMBIGUOUS_CONFIG_KEY),
+ "error_message");
+
+ vector<uint8_t> output;
+ stats.dumpStats(&output, false);
+ StatsdStatsReport report;
+ bool good = report.ParseFromArray(&output[0], output.size());
+ EXPECT_TRUE(good);
+
+ ASSERT_EQ(3, report.restricted_metric_query_stats().size());
+ EXPECT_EQ(configKey.GetId(), report.restricted_metric_query_stats(0).config_id());
+ EXPECT_EQ(configKey.GetUid(), report.restricted_metric_query_stats(0).config_uid());
+ EXPECT_EQ(callingUid, report.restricted_metric_query_stats(0).calling_uid());
+ EXPECT_EQ(configPackage, report.restricted_metric_query_stats(0).config_package());
+ EXPECT_FALSE(report.restricted_metric_query_stats(0).has_query_error());
+ EXPECT_LT(beforeNoteMetricSucceed,
+ report.restricted_metric_query_stats(0).query_wall_time_ns());
+ EXPECT_GT(afterNoteMetricSucceed, report.restricted_metric_query_stats(0).query_wall_time_ns());
+ EXPECT_EQ(5 * NS_PER_SEC, report.restricted_metric_query_stats(0).query_latency_ns());
+ EXPECT_EQ(configIdWithError, report.restricted_metric_query_stats(1).config_id());
+ EXPECT_EQ(AMBIGUOUS_CONFIG_KEY, report.restricted_metric_query_stats(1).invalid_query_reason());
+ EXPECT_EQ(false, report.restricted_metric_query_stats(1).has_config_uid());
+ EXPECT_FALSE(report.restricted_metric_query_stats(1).has_query_error());
+ EXPECT_FALSE(report.restricted_metric_query_stats(1).has_query_latency_ns());
+ EXPECT_EQ("error_message", report.restricted_metric_query_stats(2).query_error());
+ EXPECT_FALSE(report.restricted_metric_query_stats(2).has_query_latency_ns());
+ EXPECT_NE(report.restricted_metric_query_stats(1).query_wall_time_ns(),
+ report.restricted_metric_query_stats(0).query_wall_time_ns());
+}
+
TEST(StatsdStatsTest, TestAnomalyMonitor) {
StatsdStats stats;
stats.noteRegisteredAnomalyAlarmChanged();
@@ -393,7 +592,7 @@
timestamps.push_back(i);
}
ConfigKey key(0, 12345);
- stats.noteConfigReceived(key, 2, 3, 4, 5, {}, true);
+ stats.noteConfigReceived(key, 2, 3, 4, 5, {}, nullopt);
for (int i = 0; i < StatsdStats::kMaxTimestampCount; i++) {
stats.noteDataDropped(key, timestamps[i]);
@@ -512,7 +711,7 @@
// We must call noteAtomLogged as well because only those pushed atoms
// that have been logged will have stats printed about them in the
// proto.
- stats.noteAtomLogged(pushAtomTag, /*timeSec=*/0);
+ stats.noteAtomLogged(pushAtomTag, /*timeSec=*/0, false);
stats.noteAtomError(pushAtomTag, /*pull=*/false);
stats.noteAtomError(pullAtomTag, /*pull=*/true);
@@ -528,6 +727,8 @@
const auto& pushedAtomStats = report.atom_stats(0);
EXPECT_EQ(pushAtomTag, pushedAtomStats.tag());
EXPECT_EQ(numErrors, pushedAtomStats.error_count());
+ EXPECT_FALSE(pushedAtomStats.has_dropped_count());
+ EXPECT_FALSE(pushedAtomStats.has_skip_count());
// Check error count = numErrors for pull atom
ASSERT_EQ(1, report.pulled_atom_stats_size());
@@ -536,6 +737,169 @@
EXPECT_EQ(numErrors, pulledAtomStats.atom_error_count());
}
+TEST(StatsdStatsTest, TestAtomDroppedStats) {
+ StatsdStats stats;
+
+ const int pushAtomTag = 100;
+ const int nonPlatformPushAtomTag = StatsdStats::kMaxPushedAtomId + 100;
+
+ const int numDropped = 10;
+ for (int i = 0; i < numDropped; i++) {
+ stats.noteEventQueueOverflow(/*oldestEventTimestampNs*/ 0, pushAtomTag, false);
+ stats.noteEventQueueOverflow(/*oldestEventTimestampNs*/ 0, nonPlatformPushAtomTag, false);
+ }
+
+ vector<uint8_t> output;
+ stats.dumpStats(&output, true);
+ ASSERT_EQ(0, stats.mPushedAtomDropsStats.size());
+
+ StatsdStatsReport report;
+ EXPECT_TRUE(report.ParseFromArray(&output[0], output.size()));
+
+ // Check dropped_count = numDropped for push atoms
+ ASSERT_EQ(2, report.atom_stats_size());
+
+ const auto& pushedAtomStats = report.atom_stats(0);
+ EXPECT_EQ(pushAtomTag, pushedAtomStats.tag());
+ EXPECT_EQ(numDropped, pushedAtomStats.count());
+ EXPECT_EQ(numDropped, pushedAtomStats.dropped_count());
+ EXPECT_FALSE(pushedAtomStats.has_error_count());
+ EXPECT_FALSE(pushedAtomStats.has_skip_count());
+
+ const auto& nonPlatformPushedAtomStats = report.atom_stats(1);
+ EXPECT_EQ(nonPlatformPushAtomTag, nonPlatformPushedAtomStats.tag());
+ EXPECT_EQ(numDropped, nonPlatformPushedAtomStats.count());
+ EXPECT_EQ(numDropped, nonPlatformPushedAtomStats.dropped_count());
+ EXPECT_FALSE(nonPlatformPushedAtomStats.has_error_count());
+ EXPECT_FALSE(nonPlatformPushedAtomStats.has_skip_count());
+}
+
+TEST(StatsdStatsTest, TestAtomLoggedAndDroppedStats) {
+ StatsdStats stats;
+
+ const int pushAtomTag = 100;
+ const int nonPlatformPushAtomTag = StatsdStats::kMaxPushedAtomId + 100;
+
+ const int numLogged = 10;
+ for (int i = 0; i < numLogged; i++) {
+ stats.noteAtomLogged(pushAtomTag, /*timeSec*/ 0, false);
+ stats.noteAtomLogged(nonPlatformPushAtomTag, /*timeSec*/ 0, false);
+ }
+
+ const int numDropped = 10;
+ for (int i = 0; i < numDropped; i++) {
+ stats.noteEventQueueOverflow(/*oldestEventTimestampNs*/ 0, pushAtomTag, false);
+ stats.noteEventQueueOverflow(/*oldestEventTimestampNs*/ 0, nonPlatformPushAtomTag, false);
+ }
+
+ vector<uint8_t> output;
+ stats.dumpStats(&output, false);
+ StatsdStatsReport report;
+ EXPECT_TRUE(report.ParseFromArray(&output[0], output.size()));
+
+ // Check dropped_count = numDropped for push atoms
+ ASSERT_EQ(2, report.atom_stats_size());
+
+ const auto& pushedAtomStats = report.atom_stats(0);
+ EXPECT_EQ(pushAtomTag, pushedAtomStats.tag());
+ EXPECT_EQ(numLogged + numDropped, pushedAtomStats.count());
+ EXPECT_EQ(numDropped, pushedAtomStats.dropped_count());
+ EXPECT_FALSE(pushedAtomStats.has_error_count());
+ EXPECT_FALSE(pushedAtomStats.has_skip_count());
+
+ const auto& nonPlatformPushedAtomStats = report.atom_stats(1);
+ EXPECT_EQ(nonPlatformPushAtomTag, nonPlatformPushedAtomStats.tag());
+ EXPECT_EQ(numLogged + numDropped, nonPlatformPushedAtomStats.count());
+ EXPECT_EQ(numDropped, nonPlatformPushedAtomStats.dropped_count());
+ EXPECT_FALSE(nonPlatformPushedAtomStats.has_error_count());
+ EXPECT_FALSE(nonPlatformPushedAtomStats.has_skip_count());
+}
+
+TEST(StatsdStatsTest, TestAtomSkippedStats) {
+ StatsdStats stats;
+
+ const int pushAtomTag = 100;
+ const int nonPlatformPushAtomTag = StatsdStats::kMaxPushedAtomId + 100;
+ const int numSkipped = 10;
+
+ for (int i = 0; i < numSkipped; i++) {
+ stats.noteAtomLogged(pushAtomTag, /*timeSec=*/0, /*isSkipped*/ true);
+ stats.noteAtomLogged(nonPlatformPushAtomTag, /*timeSec=*/0, /*isSkipped*/ true);
+ }
+
+ vector<uint8_t> output;
+ stats.dumpStats(&output, false);
+ StatsdStatsReport report;
+ EXPECT_TRUE(report.ParseFromArray(&output[0], output.size()));
+
+ // Check skip_count = numSkipped for push atoms
+ ASSERT_EQ(2, report.atom_stats_size());
+
+ const auto& pushedAtomStats = report.atom_stats(0);
+ EXPECT_EQ(pushAtomTag, pushedAtomStats.tag());
+ EXPECT_EQ(numSkipped, pushedAtomStats.count());
+ EXPECT_EQ(numSkipped, pushedAtomStats.skip_count());
+ EXPECT_FALSE(pushedAtomStats.has_error_count());
+
+ const auto& nonPlatformPushedAtomStats = report.atom_stats(1);
+ EXPECT_EQ(nonPlatformPushAtomTag, nonPlatformPushedAtomStats.tag());
+ EXPECT_EQ(numSkipped, nonPlatformPushedAtomStats.count());
+ EXPECT_EQ(numSkipped, nonPlatformPushedAtomStats.skip_count());
+ EXPECT_FALSE(nonPlatformPushedAtomStats.has_error_count());
+}
+
+TEST(StatsdStatsTest, TestAtomLoggedAndDroppedAndSkippedStats) {
+ StatsdStats stats;
+
+ const int pushAtomTag = 100;
+ const int nonPlatformPushAtomTag = StatsdStats::kMaxPushedAtomId + 100;
+
+ const int numLogged = 10;
+ for (int i = 0; i < numLogged; i++) {
+ stats.noteAtomLogged(pushAtomTag, /*timeSec*/ 0, false);
+ stats.noteAtomLogged(nonPlatformPushAtomTag, /*timeSec*/ 0, false);
+ }
+
+ const int numDropped = 10;
+ for (int i = 0; i < numDropped; i++) {
+ stats.noteEventQueueOverflow(/*oldestEventTimestampNs*/ 0, pushAtomTag, true);
+ stats.noteEventQueueOverflow(/*oldestEventTimestampNs*/ 0, nonPlatformPushAtomTag, true);
+ }
+
+ vector<uint8_t> output;
+ stats.dumpStats(&output, false);
+ StatsdStatsReport report;
+ EXPECT_TRUE(report.ParseFromArray(&output[0], output.size()));
+
+ // Check dropped_count = numDropped for push atoms
+ ASSERT_EQ(2, report.atom_stats_size());
+
+ const auto& pushedAtomStats = report.atom_stats(0);
+ EXPECT_EQ(pushAtomTag, pushedAtomStats.tag());
+ EXPECT_EQ(numLogged + numDropped, pushedAtomStats.count());
+ EXPECT_EQ(numDropped, pushedAtomStats.dropped_count());
+ EXPECT_EQ(numDropped, pushedAtomStats.skip_count());
+ EXPECT_FALSE(pushedAtomStats.has_error_count());
+
+ const auto& nonPlatformPushedAtomStats = report.atom_stats(1);
+ EXPECT_EQ(nonPlatformPushAtomTag, nonPlatformPushedAtomStats.tag());
+ EXPECT_EQ(numLogged + numDropped, nonPlatformPushedAtomStats.count());
+ EXPECT_EQ(numDropped, nonPlatformPushedAtomStats.dropped_count());
+ EXPECT_EQ(numDropped, nonPlatformPushedAtomStats.skip_count());
+ EXPECT_FALSE(nonPlatformPushedAtomStats.has_error_count());
+}
+
+TEST(StatsdStatsTest, TestShardOffsetProvider) {
+ StatsdStats stats;
+ ShardOffsetProvider::getInstance().setShardOffset(15);
+ vector<uint8_t> output;
+ stats.dumpStats(&output, false);
+
+ StatsdStatsReport report;
+ EXPECT_TRUE(report.ParseFromArray(&output[0], output.size()));
+ EXPECT_EQ(report.shard_offset(), 15);
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/metrics/CountMetricProducer_test.cpp b/statsd/tests/metrics/CountMetricProducer_test.cpp
index b42e465..32c48b0 100644
--- a/statsd/tests/metrics/CountMetricProducer_test.cpp
+++ b/statsd/tests/metrics/CountMetricProducer_test.cpp
@@ -160,8 +160,10 @@
CountMetricProducer countProducer(kConfigKey, metric, 0, {ConditionState::kUnknown}, wizard,
protoHash, bucketStartTimeNs, bucketStartTimeNs);
+ assertConditionTimer(countProducer.mConditionTimer, false, 0, 0);
countProducer.onConditionChanged(true, bucketStartTimeNs);
+ assertConditionTimer(countProducer.mConditionTimer, true, 0, bucketStartTimeNs);
LogEvent event1(/*uid=*/0, /*pid=*/0);
makeLogEvent(&event1, bucketStartTimeNs + 1, /*atomId=*/1);
@@ -170,6 +172,7 @@
ASSERT_EQ(0UL, countProducer.mPastBuckets.size());
countProducer.onConditionChanged(false /*new condition*/, bucketStartTimeNs + 2);
+ assertConditionTimer(countProducer.mConditionTimer, false, 2, bucketStartTimeNs + 2);
// Upon this match event, the matched event1 is flushed.
LogEvent event2(/*uid=*/0, /*pid=*/0);
diff --git a/statsd/tests/metrics/DurationMetricProducer_test.cpp b/statsd/tests/metrics/DurationMetricProducer_test.cpp
index ea877e2..995ed3f 100644
--- a/statsd/tests/metrics/DurationMetricProducer_test.cpp
+++ b/statsd/tests/metrics/DurationMetricProducer_test.cpp
@@ -150,6 +150,7 @@
bucketStartTimeNs, bucketStartTimeNs);
durationProducer.mCondition = ConditionState::kFalse;
+ assertConditionTimer(durationProducer.mConditionTimer, false, 0, 0);
EXPECT_FALSE(durationProducer.mCondition);
EXPECT_FALSE(durationProducer.isConditionSliced());
@@ -158,18 +159,24 @@
durationProducer.flushIfNeededLocked(bucketStartTimeNs + bucketSizeNs + 1);
ASSERT_EQ(0UL, durationProducer.mPastBuckets.size());
+ int64_t conditionStartTimeNs = bucketStartTimeNs + bucketSizeNs + 2;
+ int64_t bucket2EndTimeNs = bucketStartTimeNs + 2 * bucketSizeNs;
durationProducer.onMatchedLogEvent(1 /* start index*/, event3);
- durationProducer.onConditionChanged(true /* condition */, bucketStartTimeNs + bucketSizeNs + 2);
+ durationProducer.onConditionChanged(true /* condition */, conditionStartTimeNs);
+ assertConditionTimer(durationProducer.mConditionTimer, true, 0, conditionStartTimeNs);
durationProducer.onMatchedLogEvent(2 /* stop index*/, event4);
- durationProducer.flushIfNeededLocked(bucketStartTimeNs + 2 * bucketSizeNs + 1);
+ durationProducer.flushIfNeededLocked(bucket2EndTimeNs + 1);
+ assertConditionTimer(durationProducer.mConditionTimer, true, 0, bucket2EndTimeNs,
+ /*currentBucketStartDelayNs=*/1);
ASSERT_EQ(1UL, durationProducer.mPastBuckets.size());
EXPECT_TRUE(durationProducer.mPastBuckets.find(DEFAULT_METRIC_DIMENSION_KEY) !=
durationProducer.mPastBuckets.end());
const auto& buckets2 = durationProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY];
ASSERT_EQ(1UL, buckets2.size());
EXPECT_EQ(bucketStartTimeNs + bucketSizeNs, buckets2[0].mBucketStartNs);
- EXPECT_EQ(bucketStartTimeNs + 2 * bucketSizeNs, buckets2[0].mBucketEndNs);
+ EXPECT_EQ(bucket2EndTimeNs, buckets2[0].mBucketEndNs);
EXPECT_EQ(1LL, buckets2[0].mDuration);
+ EXPECT_EQ(bucket2EndTimeNs - conditionStartTimeNs, buckets2[0].mConditionTrueNs);
}
TEST(DurationMetricTrackerTest, TestNonSlicedConditionUnknownState) {
diff --git a/statsd/tests/metrics/EventMetricProducer_test.cpp b/statsd/tests/metrics/EventMetricProducer_test.cpp
index 192848e..d5fe932 100644
--- a/statsd/tests/metrics/EventMetricProducer_test.cpp
+++ b/statsd/tests/metrics/EventMetricProducer_test.cpp
@@ -41,11 +41,15 @@
const ConfigKey kConfigKey(0, 12345);
const uint64_t protoHash = 0x1234567890;
-void makeLogEvent(LogEvent* logEvent, int32_t atomId, int64_t timestampNs, string str) {
+void makeLogEvent(LogEvent* logEvent, int32_t atomId, int64_t timestampNs, string str,
+ vector<uint8_t>* bytesField = nullptr) {
AStatsEvent* statsEvent = AStatsEvent_obtain();
AStatsEvent_setAtomId(statsEvent, atomId);
AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
AStatsEvent_writeString(statsEvent, str.c_str());
+ if (bytesField != nullptr) {
+ AStatsEvent_writeByteArray(statsEvent, bytesField->data(), bytesField->size());
+ }
parseStatsEventToLogEvent(statsEvent, logEvent);
}
@@ -241,6 +245,58 @@
}
}
+TEST_F(EventMetricProducerTest, TestBytesFieldAggregatedEvents) {
+ int64_t bucketStartTimeNs = 10000000000;
+ int tagId = 1;
+
+ EventMetric metric;
+ metric.set_id(1);
+
+ vector<uint8_t> bytesField1{10, 20, 30};
+ vector<uint8_t> bytesField2{10, 20, 30, 40};
+ LogEvent event1(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event1, tagId, bucketStartTimeNs + 10, "111", &bytesField1);
+ LogEvent event2(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event2, tagId, bucketStartTimeNs + 20, "111", &bytesField1);
+ LogEvent event3(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event3, tagId, bucketStartTimeNs + 30, "111", &bytesField1);
+
+ LogEvent event4(/*uid=*/0, /*pid=*/0);
+ makeLogEvent(&event4, tagId, bucketStartTimeNs + 40, "111", &bytesField2);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ EventMetricProducer eventProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, {},
+ wizard, protoHash, bucketStartTimeNs);
+
+ eventProducer.onMatchedLogEvent(1 /*matcher index*/, event1);
+ eventProducer.onMatchedLogEvent(1 /*matcher index*/, event2);
+ eventProducer.onMatchedLogEvent(1 /*matcher index*/, event3);
+ eventProducer.onMatchedLogEvent(1 /*matcher index*/, event4);
+
+ // Check dump report content.
+ ProtoOutputStream output;
+ std::set<string> strSet;
+ eventProducer.onDumpReport(bucketStartTimeNs + 50, true /*include current partial bucket*/,
+ true /*erase data*/, FAST, &strSet, &output);
+
+ StatsLogReport report = outputStreamToProto(&output);
+ EXPECT_TRUE(report.has_event_metrics());
+ ASSERT_EQ(2, report.event_metrics().data_size());
+
+ for (EventMetricData metricData : report.event_metrics().data()) {
+ AggregatedAtomInfo atomInfo = metricData.aggregated_atom_info();
+ if (atomInfo.elapsed_timestamp_nanos_size() == 1) {
+ EXPECT_EQ(atomInfo.elapsed_timestamp_nanos(0), bucketStartTimeNs + 40);
+ } else if (atomInfo.elapsed_timestamp_nanos_size() == 3) {
+ EXPECT_EQ(atomInfo.elapsed_timestamp_nanos(0), bucketStartTimeNs + 10);
+ EXPECT_EQ(atomInfo.elapsed_timestamp_nanos(1), bucketStartTimeNs + 20);
+ EXPECT_EQ(atomInfo.elapsed_timestamp_nanos(2), bucketStartTimeNs + 30);
+ } else {
+ FAIL();
+ }
+ }
+}
+
TEST_F(EventMetricProducerTest, TestTwoAtomTagAggregatedEvents) {
int64_t bucketStartTimeNs = 10000000000;
int tagId = 1;
diff --git a/statsd/tests/metrics/GaugeMetricProducer_test.cpp b/statsd/tests/metrics/GaugeMetricProducer_test.cpp
index c64fb7b..9f010a4 100644
--- a/statsd/tests/metrics/GaugeMetricProducer_test.cpp
+++ b/statsd/tests/metrics/GaugeMetricProducer_test.cpp
@@ -150,7 +150,7 @@
allData.clear();
allData.push_back(makeLogEvent(tagId, bucket2StartTimeNs + 1, 10, "some value", 11));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
auto it = gaugeProducer.mCurrentSlicedBucket->begin()->second.front().mFields->begin();
EXPECT_EQ(INT, it->mValue.getType());
@@ -168,7 +168,7 @@
allData.clear();
allData.push_back(makeLogEvent(tagId, bucket3StartTimeNs + 10, 24, "some value", 25));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
it = gaugeProducer.mCurrentSlicedBucket->begin()->second.front().mFields->begin();
EXPECT_EQ(INT, it->mValue.getType());
@@ -331,7 +331,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 1));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(1, gaugeProducer.mCurrentSlicedBucket->begin()
->second.front()
@@ -361,7 +361,8 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + bucketSizeNs + 1, 3));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + bucketSizeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucketStartTimeNs + bucketSizeNs);
ASSERT_EQ(2UL, gaugeProducer.mPastBuckets[DEFAULT_METRIC_DIMENSION_KEY].size());
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(3, gaugeProducer.mCurrentSlicedBucket->begin()
@@ -399,7 +400,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 1));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(1, gaugeProducer.mCurrentSlicedBucket->begin()
->second.front()
@@ -462,7 +463,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 110));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(110, gaugeProducer.mCurrentSlicedBucket->begin()
@@ -551,7 +552,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1000, 110));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
ASSERT_EQ(1UL, gaugeProducer.mPastBuckets.size());
@@ -598,7 +599,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 13));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(13L, gaugeProducer.mCurrentSlicedBucket->begin()
->second.front()
@@ -611,7 +612,8 @@
allData.clear();
allData.push_back(event2);
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + bucketSizeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucketStartTimeNs + bucketSizeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(15L, gaugeProducer.mCurrentSlicedBucket->begin()
->second.front()
@@ -623,7 +625,8 @@
allData.clear();
allData.push_back(
CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 2 * bucketSizeNs + 10, 26));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 2 * bucketSizeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + 2 * bucketSizeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_EQ(26L, gaugeProducer.mCurrentSlicedBucket->begin()
->second.front()
@@ -635,7 +638,8 @@
// This event does not have the gauge field. Thus the current bucket value is 0.
allData.clear();
allData.push_back(CreateNoValuesLogEvent(tagId, bucketStartTimeNs + 3 * bucketSizeNs + 10));
- gaugeProducer.onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + 3 * bucketSizeNs);
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucketStartTimeNs + 3 * bucketSizeNs);
ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
EXPECT_TRUE(gaugeProducer.mCurrentSlicedBucket->begin()->second.front().mFields->empty());
}
@@ -703,6 +707,64 @@
EXPECT_THAT(atomValues, UnorderedElementsAre(4, 5));
}
+TEST(GaugeMetricProducerTest, TestPullNWithoutTrigger) {
+ GaugeMetric metric;
+ metric.set_id(metricId);
+ metric.set_bucket(ONE_MINUTE);
+ metric.set_sampling_type(GaugeMetric::FIRST_N_SAMPLES);
+ metric.set_max_pull_delay_sec(INT_MAX);
+ metric.set_max_num_gauge_atoms_per_bucket(3);
+ auto gaugeFieldMatcher = metric.mutable_gauge_fields_filter()->mutable_fields();
+ gaugeFieldMatcher->set_field(tagId);
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+
+ sp<EventMatcherWizard> eventMatcherWizard =
+ createEventMatcherWizard(tagId, logEventMatcherIndex);
+
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+ EXPECT_CALL(*pullerManager, RegisterReceiver(tagId, kConfigKey, _, _, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, UnRegisterReceiver(tagId, kConfigKey, _)).WillOnce(Return());
+ EXPECT_CALL(*pullerManager, Pull(tagId, kConfigKey, _, _))
+ .WillOnce(Invoke([](int tagId, const ConfigKey&, const int64_t eventTimeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ EXPECT_EQ(eventTimeNs, bucketStartTimeNs);
+ data->clear();
+ data->push_back(CreateRepeatedValueLogEvent(tagId, eventTimeNs, 4));
+ data->push_back(CreateRepeatedValueLogEvent(tagId, eventTimeNs, 5));
+ data->push_back(CreateRepeatedValueLogEvent(tagId, eventTimeNs, 6));
+ data->push_back(CreateRepeatedValueLogEvent(tagId, eventTimeNs, 7));
+ return true;
+ }));
+
+ GaugeMetricProducer gaugeProducer(kConfigKey, metric, -1 /*-1 meaning no condition*/, {},
+ wizard, protoHash, logEventMatcherIndex, eventMatcherWizard,
+ tagId, /*triggerId=*/-1, tagId, bucketStartTimeNs,
+ bucketStartTimeNs, pullerManager);
+
+ EXPECT_EQ(0UL, gaugeProducer.mCurrentSlicedBucket->size());
+ gaugeProducer.prepareFirstBucket();
+ ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
+ EXPECT_EQ(3UL, gaugeProducer.mCurrentSlicedBucket->begin()->second.size());
+
+ vector<std::shared_ptr<LogEvent>> allData;
+ allData.push_back(CreateNoValuesLogEvent(tagId, bucket2StartTimeNs + 10));
+ gaugeProducer.onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 30);
+ ASSERT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->size());
+ EXPECT_EQ(1UL, gaugeProducer.mCurrentSlicedBucket->begin()->second.size());
+
+ ASSERT_EQ(1UL, gaugeProducer.mPastBuckets.size());
+ ASSERT_EQ(3UL, gaugeProducer.mPastBuckets.begin()->second.back().mAggregatedAtoms.size());
+ auto it = gaugeProducer.mPastBuckets.begin()->second.back().mAggregatedAtoms.begin();
+ vector<int> atomValues;
+ atomValues.emplace_back(it->first.getAtomFieldValues().getValues().begin()->mValue.int_value);
+ it++;
+ atomValues.emplace_back(it->first.getAtomFieldValues().getValues().begin()->mValue.int_value);
+ it++;
+ atomValues.emplace_back(it->first.getAtomFieldValues().getValues().begin()->mValue.int_value);
+ EXPECT_THAT(atomValues, UnorderedElementsAre(4, 5, 6));
+}
+
TEST(GaugeMetricProducerTest, TestRemoveDimensionInOutput) {
GaugeMetric metric;
metric.set_id(metricId);
@@ -848,6 +910,96 @@
EXPECT_EQ(NanoToMillis(bucketStartTimeNs + 9000000), dropEvent.drop_time_millis());
}
+TEST(GaugeMetricProducerTest, TestPullDimensionalSampling) {
+ ShardOffsetProvider::getInstance().setShardOffset(5);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT"); // LogEvent defaults to UID of root.
+
+ int triggerId = 5;
+ int shardCount = 2;
+ GaugeMetric sampledGaugeMetric = createGaugeMetric(
+ "GaugePullSampled", metricId, GaugeMetric::FIRST_N_SAMPLES, nullopt, triggerId);
+ sampledGaugeMetric.set_max_pull_delay_sec(INT_MAX);
+ *sampledGaugeMetric.mutable_dimensions_in_what() = CreateDimensions(tagId, {1});
+ *sampledGaugeMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(tagId, {1});
+ sampledGaugeMetric.mutable_dimensional_sampling_info()->set_shard_count(shardCount);
+ *config.add_gauge_metric() = sampledGaugeMetric;
+
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+
+ sp<EventMatcherWizard> eventMatcherWizard =
+ createEventMatcherWizard(tagId, logEventMatcherIndex);
+
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ EXPECT_CALL(*pullerManager, Pull(tagId, kConfigKey, _, _))
+ .WillOnce(Invoke([](int tagId, const ConfigKey&, const int64_t eventTimeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 10, 1001, 5, 10));
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 10, 1002, 10, 10));
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 10, 1003, 15, 10));
+ return true;
+ }))
+ .WillOnce(Invoke([](int tagId, const ConfigKey&, const int64_t eventTimeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 20, 1001, 6, 10));
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 20, 1002, 12, 10));
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 20, 1003, 18, 10));
+ return true;
+ }));
+
+ GaugeMetricProducer gaugeProducer(kConfigKey, sampledGaugeMetric,
+ -1 /*-1 meaning no condition*/, {}, wizard, protoHash,
+ logEventMatcherIndex, eventMatcherWizard, tagId, triggerId,
+ tagId, bucketStartTimeNs, bucketStartTimeNs, pullerManager);
+ SamplingInfo samplingInfo;
+ samplingInfo.shardCount = shardCount;
+ translateFieldMatcher(sampledGaugeMetric.dimensional_sampling_info().sampled_what_field(),
+ &samplingInfo.sampledWhatFields);
+ gaugeProducer.setSamplingInfo(samplingInfo);
+ gaugeProducer.prepareFirstBucket();
+
+ LogEvent triggerEvent(/*uid=*/0, /*pid=*/0);
+ CreateRepeatedValueLogEvent(&triggerEvent, triggerId, bucketStartTimeNs + 10, 5);
+ gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, triggerEvent);
+
+ triggerEvent.setElapsedTimestampNs(bucketStartTimeNs + 20);
+ gaugeProducer.onMatchedLogEvent(1 /*log matcher index*/, triggerEvent);
+
+ // Check dump report.
+ ProtoOutputStream output;
+ std::set<string> strSet;
+ int64_t dumpReportTimeNs = bucketStartTimeNs + 10000000000;
+ gaugeProducer.onDumpReport(dumpReportTimeNs, true /* include current buckets */, true,
+ NO_TIME_CONSTRAINTS /* dumpLatency */, &strSet, &output);
+
+ StatsLogReport report = outputStreamToProto(&output);
+ backfillDimensionPath(&report);
+ backfillStartEndTimestamp(&report);
+ backfillAggregatedAtoms(&report);
+
+ EXPECT_TRUE(report.has_gauge_metrics());
+ StatsLogReport::GaugeMetricDataWrapper gaugeMetrics;
+ sortMetricDataByDimensionsValue(report.gauge_metrics(), &gaugeMetrics);
+ ASSERT_EQ(2, gaugeMetrics.data_size());
+ EXPECT_EQ(0, report.gauge_metrics().skipped_size());
+
+ // Only Uid 1 and 3 are logged. (odd hash value) + (offset of 5) % (shard count of 2) = 0
+ GaugeMetricData data = gaugeMetrics.data(0);
+ ValidateUidDimension(data.dimensions_in_what(), tagId, 1001);
+ ValidateGaugeBucketTimes(data.bucket_info(0), bucketStartTimeNs, dumpReportTimeNs,
+ {bucketStartTimeNs + 10, bucketStartTimeNs + 20});
+
+ data = gaugeMetrics.data(1);
+ ValidateUidDimension(data.dimensions_in_what(), tagId, 1003);
+ ValidateGaugeBucketTimes(data.bucket_info(0), bucketStartTimeNs, dumpReportTimeNs,
+ {bucketStartTimeNs + 10, bucketStartTimeNs + 20});
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/metrics/NumericValueMetricProducer_test.cpp b/statsd/tests/metrics/NumericValueMetricProducer_test.cpp
index 1b5abaf..cfab4c9 100644
--- a/statsd/tests/metrics/NumericValueMetricProducer_test.cpp
+++ b/statsd/tests/metrics/NumericValueMetricProducer_test.cpp
@@ -101,15 +101,6 @@
}
}
-static void assertConditionTimer(const ConditionTimer& conditionTimer, bool condition,
- int64_t timerNs, int64_t lastConditionTrueTimestampNs,
- int64_t currentBucketStartDelayNs = 0) {
- EXPECT_EQ(condition, conditionTimer.mCondition);
- EXPECT_EQ(timerNs, conditionTimer.mTimerNs);
- EXPECT_EQ(lastConditionTrueTimestampNs, conditionTimer.mLastConditionChangeTimestampNs);
- EXPECT_EQ(currentBucketStartDelayNs, conditionTimer.mCurrentBucketStartDelayNs);
-}
-
} // anonymous namespace
class NumericValueMetricProducerTestHelper {
@@ -147,6 +138,22 @@
stateGroupMap);
}
+ static sp<NumericValueMetricProducer> createValueProducerWithSampling(
+ sp<MockStatsPullerManager>& pullerManager, ValueMetric& metric,
+ const int pullAtomId = tagId) {
+ sp<NumericValueMetricProducer> valueProducer = createValueProducer(
+ pullerManager, metric, pullAtomId, /*conditionAfterFirstBucketPrepared=*/nullopt,
+ /*slicedStateAtoms=*/{}, /*stateGroupMap=*/{}, bucketStartTimeNs, bucketStartTimeNs,
+ /*eventMatcherWizard=*/nullptr);
+
+ SamplingInfo samplingInfo;
+ samplingInfo.shardCount = metric.dimensional_sampling_info().shard_count();
+ translateFieldMatcher(metric.dimensional_sampling_info().sampled_what_field(),
+ &samplingInfo.sampledWhatFields);
+ valueProducer->setSamplingInfo(samplingInfo);
+ return valueProducer;
+ }
+
static sp<NumericValueMetricProducer> createValueProducerWithBucketParams(
sp<MockStatsPullerManager>& pullerManager, ValueMetric& metric,
const int64_t timeBaseNs, const int64_t startTimeNs, const int pullAtomId = tagId) {
@@ -349,7 +356,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// empty since bucket is flushed
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
// dimInfos holds the base
@@ -363,7 +370,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 23));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// empty since bucket is cleared
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
// dimInfos holds the base
@@ -378,7 +385,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs + 1, 36));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
// empty since bucket is cleared
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
// dimInfos holds the base
@@ -425,7 +432,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 10, 2));
- valueProducer->onDataPulled(allData, /** success */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// Partial buckets created in 2nd bucket.
switch (GetParam()) {
@@ -474,7 +481,7 @@
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 3, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// empty since bucket is cleared
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
// dimInfos holds the base
@@ -488,14 +495,14 @@
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket3StartTimeNs + 1, 4, 23));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// No new data seen, so data has been cleared.
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(0UL, valueProducer->mDimInfos.size());
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket4StartTimeNs + 1, 3, 36));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -525,7 +532,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// empty since bucket is cleared
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
// dimInfos holds the base
@@ -539,7 +546,7 @@
allData.clear();
// 10 is less than 11, so we reset and keep 10 as the value.
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// empty since the bucket is flushed.
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -551,7 +558,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs + 1, 36));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -578,7 +585,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// empty since bucket is cleared
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
// mDimInfos holds the base
@@ -592,7 +599,7 @@
allData.clear();
// 10 is less than 11, so we reset. 10 only updates the base.
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -602,7 +609,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs + 1, 36));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -664,7 +671,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 110));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs - 8}, {0},
{bucketStartTimeNs}, {bucket2StartTimeNs});
@@ -777,7 +784,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 100));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
switch (GetParam()) {
@@ -795,7 +802,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 150));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
EXPECT_EQ(bucket3StartTimeNs, valueProducer->mCurrentBucketStartTimeNs);
EXPECT_EQ(2, valueProducer->getCurrentBucketNum());
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20, 30},
@@ -823,7 +830,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 100));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -913,9 +920,25 @@
curInterval = valueProducer->mCurrentSlicedBucket.begin()->second.intervals[0];
EXPECT_EQ(30, curInterval.aggregate.long_value);
- valueProducer->flushIfNeededLocked(bucket2StartTimeNs);
- assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs}, {0},
- {bucketStartTimeNs}, {bucket2StartTimeNs});
+ // Check dump report.
+ ProtoOutputStream output;
+ valueProducer->onDumpReport(bucket2StartTimeNs + 10000, false /* include recent buckets */,
+ true, FAST /* dumpLatency */, nullptr, &output);
+
+ StatsLogReport report = outputStreamToProto(&output);
+ backfillDimensionPath(&report);
+ backfillStartEndTimestamp(&report);
+ EXPECT_TRUE(report.has_value_metrics());
+ ASSERT_EQ(1, report.value_metrics().data_size());
+ ASSERT_EQ(0, report.value_metrics().skipped_size());
+
+ auto data = report.value_metrics().data(0);
+ ASSERT_EQ(1, data.bucket_info_size());
+ EXPECT_EQ(30, data.bucket_info(0).values(0).value_long());
+ EXPECT_EQ(bucketStartTimeNs, data.bucket_info(0).start_bucket_elapsed_nanos());
+ EXPECT_EQ(bucket2StartTimeNs, data.bucket_info(0).end_bucket_elapsed_nanos());
+ EXPECT_FALSE(data.has_dimensions_in_what());
+
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
}
@@ -970,9 +993,26 @@
curInterval = valueProducer->mCurrentSlicedBucket.begin()->second.intervals[0];
EXPECT_EQ(50, curInterval.aggregate.long_value);
- valueProducer->flushIfNeededLocked(bucket2StartTimeNs);
- assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {50}, {20}, {0},
- {bucketStartTimeNs}, {bucket2StartTimeNs});
+ // Check dump report.
+ ProtoOutputStream output;
+ valueProducer->onDumpReport(bucket2StartTimeNs + 10000, false /* include recent buckets */,
+ true, FAST /* dumpLatency */, nullptr, &output);
+
+ StatsLogReport report = outputStreamToProto(&output);
+ backfillDimensionPath(&report);
+ backfillStartEndTimestamp(&report);
+ EXPECT_TRUE(report.has_value_metrics());
+ ASSERT_EQ(1, report.value_metrics().data_size());
+ ASSERT_EQ(0, report.value_metrics().skipped_size());
+
+ auto data = report.value_metrics().data(0);
+ ASSERT_EQ(1, data.bucket_info_size());
+ EXPECT_EQ(50, data.bucket_info(0).values(0).value_long());
+ EXPECT_EQ(20, data.bucket_info(0).condition_true_nanos());
+ EXPECT_EQ(bucketStartTimeNs, data.bucket_info(0).start_bucket_elapsed_nanos());
+ EXPECT_EQ(bucket2StartTimeNs, data.bucket_info(0).end_bucket_elapsed_nanos());
+ EXPECT_FALSE(data.has_dimensions_in_what());
+
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
}
@@ -1108,7 +1148,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// empty since bucket is finished
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1122,7 +1162,7 @@
// pull 2 at correct time
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 23));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// empty since bucket is finished
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1139,7 +1179,7 @@
// The new bucket is back to normal.
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket6StartTimeNs + 1, 36));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket6StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket6StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -1220,7 +1260,7 @@
// since the condition turned to off before this pull finish, it has no effect
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 110));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {bucketSizeNs - 8}, {1},
{bucketStartTimeNs}, {bucket2StartTimeNs});
@@ -1306,7 +1346,7 @@
// for the new bucket since it was just pulled.
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 50, 140));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 50);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 50);
ASSERT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1321,7 +1361,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 160));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
assertPastBucketValuesSingleKey(
valueProducer->mPastBuckets, {20, 30}, {bucketSizeNs - 8, bucketSizeNs - 24}, {1, -1},
@@ -1447,6 +1487,7 @@
EXPECT_TRUE(
std::abs(valueProducer->mPastBuckets.begin()->second.back().aggregates[0].double_value -
12.5) < epsilon);
+ EXPECT_EQ(2, valueProducer->mPastBuckets.begin()->second.back().sampleSizes[0]);
}
TEST(NumericValueMetricProducerTest, TestPushedAggregateSum) {
@@ -1709,7 +1750,7 @@
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 2, 4));
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
EXPECT_EQ(true, base1.has_value());
@@ -1775,7 +1816,7 @@
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 2, 4));
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
EXPECT_EQ(true, base1.has_value());
@@ -1798,7 +1839,7 @@
// This pull is incomplete since it's missing dimension 1. Will cause mDimInfos to be trimmed
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket4StartTimeNs + 1, 2, 5));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1812,7 +1853,7 @@
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 2, 13));
allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 1, 5));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket5StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket5StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -1865,7 +1906,7 @@
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 2, 4));
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -1891,7 +1932,7 @@
// next pull somehow did not happen, skip to end of bucket 3
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket4StartTimeNs + 1, 2, 5));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
// Only one dimension left. One was trimmed.
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -1906,7 +1947,7 @@
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 2, 14));
allData.push_back(CreateTwoValueLogEvent(tagId, bucket5StartTimeNs + 1, 1, 14));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket5StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket5StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -1914,7 +1955,7 @@
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket6StartTimeNs + 1, 1, 19));
allData.push_back(CreateTwoValueLogEvent(tagId, bucket6StartTimeNs + 1, 2, 20));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket6StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket6StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -1976,7 +2017,7 @@
EXPECT_EQ(0, curInterval.sampleSize);
vector<shared_ptr<LogEvent>> allData;
- valueProducer->onDataPulled(allData, /** succeed */ false, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
EXPECT_EQ(false, curBase.has_value());
@@ -2164,7 +2205,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 110));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
// This will fail and should invalidate the whole bucket since we do not have all the data
// needed to compute the metric value when the screen was on.
@@ -2174,7 +2215,7 @@
// Bucket end.
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 140));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
valueProducer->flushIfNeededLocked(bucket2StartTimeNs + 1);
@@ -2242,7 +2283,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs + 1, 1, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// First bucket added to mSkippedBuckets after flush.
ASSERT_EQ(1UL, valueProducer->mSkippedBuckets.size());
@@ -2302,7 +2343,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 110));
- valueProducer->onDataPulled(allData, /** succeed */ false, bucketStartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucketStartTimeNs);
valueProducer->onConditionChanged(false, bucketStartTimeNs + 2);
valueProducer->onConditionChanged(true, bucketStartTimeNs + 3);
@@ -2310,7 +2351,7 @@
// Bucket end.
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 140));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
valueProducer->flushIfNeededLocked(bucket2StartTimeNs + 1);
@@ -2379,7 +2420,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 1, 110));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucketStartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs);
valueProducer->onConditionChanged(false, bucketStartTimeNs + 2);
valueProducer->onConditionChanged(true, bucketStartTimeNs + 3);
@@ -2387,7 +2428,7 @@
// Bucket end.
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 140));
- valueProducer->onDataPulled(allData, /** succeed */ false, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucket2StartTimeNs);
valueProducer->flushIfNeededLocked(bucket2StartTimeNs + 1);
@@ -2441,7 +2482,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 110));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
EXPECT_EQ(valueProducer->mDimInfos.begin()->second.seenNewData, false);
@@ -2451,7 +2492,7 @@
// Bucket 3 empty.
allData.clear();
allData.push_back(CreateNoValuesLogEvent(tagId, bucket3StartTimeNs + 1));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// Data has been trimmed.
ASSERT_EQ(1UL, valueProducer->mPastBuckets.size());
ASSERT_EQ(1UL, valueProducer->mSkippedBuckets.size());
@@ -2461,7 +2502,7 @@
// Bucket 4 start.
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs + 1, 150));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
ASSERT_EQ(1UL, valueProducer->mPastBuckets.size());
ASSERT_EQ(2UL, valueProducer->mSkippedBuckets.size());
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
@@ -2470,7 +2511,7 @@
// Bucket 5 start.
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket5StartTimeNs + 1, 170));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket5StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket5StartTimeNs);
assertPastBucketValuesSingleKey(
valueProducer->mPastBuckets, {107, 20}, {bucketSizeNs, bucketSizeNs}, {0, 0},
{bucketStartTimeNs, bucket4StartTimeNs}, {bucket2StartTimeNs, bucket5StartTimeNs});
@@ -2541,7 +2582,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 120));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
curBase = valueProducer->mDimInfos.begin()->second.dimExtras[0];
@@ -2599,7 +2640,7 @@
// End of bucket
vector<shared_ptr<LogEvent>> allData;
allData.clear();
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(0UL, valueProducer->mDimInfos.size());
@@ -2636,7 +2677,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 2));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// Key 1 should be removed from mDimInfos since in not present in the most pull.
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
@@ -2704,7 +2745,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs + 1, 4));
// Pull fails and arrives late.
- valueProducer->onDataPulled(allData, /** fails */ false, bucket3StartTimeNs + 1);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucket3StartTimeNs + 1);
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {9},
{partialBucketSplitTimeNs - bucketStartTimeNs}, {0},
{bucketStartTimeNs}, {partialBucketSplitTimeNs});
@@ -2748,7 +2789,7 @@
// End of first bucket
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 4));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 1);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
valueProducer->onConditionChanged(true, bucket2StartTimeNs + 10);
@@ -2780,11 +2821,11 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 30, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + 30);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs + 30);
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 20));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// Bucket should have been completed.
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs}, {0},
@@ -2810,11 +2851,11 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 30, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucketStartTimeNs + 30);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucketStartTimeNs + 30);
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 20));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// Bucket should have been completed.
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {19}, {bucketSizeNs}, {0},
@@ -2901,7 +2942,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 1);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {2}, {2}, {0}, {bucketStartTimeNs},
{bucket2StartTimeNs});
@@ -2945,18 +2986,18 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucketStartTimeNs + 3, 10));
- valueProducer->onDataPulled(allData, /** succeed */ false, bucketStartTimeNs + 3);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucketStartTimeNs + 3);
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 20));
- valueProducer->onDataPulled(allData, /** succeed */ false, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_FAIL, bucket2StartTimeNs);
valueProducer->onConditionChanged(false, bucket2StartTimeNs + 8);
valueProducer->onConditionChanged(true, bucket2StartTimeNs + 10);
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 30));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// There was not global base available so all buckets are invalid.
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {}, {}, {}, {});
@@ -2985,7 +3026,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, tagId, 2, 2));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ProtoOutputStream output;
std::set<string> strSet;
@@ -3050,7 +3091,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + 30);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 30);
// Bucket should have been completed.
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {10}, {bucketSizeNs}, {30},
@@ -3101,7 +3142,7 @@
// Now the alarm is delivered. Condition is off though.
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 110));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {20}, {50 - 8}, {0},
{bucketStartTimeNs}, {bucket2StartTimeNs});
@@ -3133,7 +3174,7 @@
// Now the alarm is delivered. Condition is on.
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 30));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {30}, {bucketSizeNs - 8}, {0},
{bucketStartTimeNs}, {bucket2StartTimeNs});
@@ -3155,7 +3196,7 @@
// Now the alarm is delivered. Condition is off though.
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 30));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// Condition was always false.
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {}, {}, {}, {}, {});
@@ -3189,7 +3230,7 @@
// Now the alarm is delivered. Condition is off though.
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 30, 30));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(0UL, valueProducer->mDimInfos.size());
@@ -3278,7 +3319,7 @@
// Bucket boundary pull.
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 15));
- valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
// Late condition change event.
valueProducer->onConditionChanged(false, bucket2StartTimeNs - 100);
@@ -3345,7 +3386,7 @@
// Bucket boundary pull.
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 15));
- valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs - 100, 20));
@@ -3689,7 +3730,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + 1, 3));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// This bucket is also dropped due to condition unknown.
int64_t conditionChangeTimeNs = bucket2StartTimeNs + 10 * NS_PER_SEC;
@@ -4687,7 +4728,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs, 1 /*uid*/, 10));
allData.push_back(CreateTwoValueLogEvent(tagId, bucket2StartTimeNs, 2 /*uid*/, 15));
- valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
// Ensure the MetricDimensionKeys for the current state are kept.
ASSERT_EQ(2UL, valueProducer->mCurrentSlicedBucket.size());
@@ -5723,7 +5764,7 @@
CreateThreeValueLogEvent(tagId, bucket2StartTimeNs, 1 /*uid*/, 13, 16 /* tag */));
allData.push_back(
CreateThreeValueLogEvent(tagId, bucket2StartTimeNs, 2 /*uid*/, 13, 8 /*tag*/));
- valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
// Buckets flushed. MetricDimensionKeys not corresponding to the current state are removed.
ASSERT_EQ(3UL, valueProducer->mCurrentSlicedBucket.size());
@@ -5933,7 +5974,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(2UL, valueProducer->mPastBuckets.size());
ASSERT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
@@ -6097,7 +6138,7 @@
// Pull at end of first bucket.
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs, 11));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(2UL, valueProducer->mPastBuckets.size());
ASSERT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(1UL, valueProducer->mDimInfos.size());
@@ -6110,7 +6151,7 @@
// Pull at end of second bucket. Since no new data is seen, mDimInfos will be cleared.
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
ASSERT_EQ(2UL, valueProducer->mPastBuckets.size());
ASSERT_EQ(1UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(0UL, valueProducer->mDimInfos.size());
@@ -6306,7 +6347,7 @@
CreateThreeValueLogEvent(tagId, bucket2StartTimeNs, 1 /*uid*/, 13, 14 /* tag */));
allData.push_back(
CreateThreeValueLogEvent(tagId, bucket2StartTimeNs, 1 /*uid*/, 13, 16 /* tag */));
- valueProducer->onDataPulled(allData, /** succeeds */ true, bucket2StartTimeNs + 1);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs + 1);
// Buckets flushed. MetricDimensionKeys not corresponding to the current state are removed.
ASSERT_EQ(2UL, valueProducer->mCurrentSlicedBucket.size());
@@ -6494,7 +6535,7 @@
allData.clear();
allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 1 /*uid*/, 21, 21));
allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 2 /*uid*/, 20, 5));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// Check dump report.
ProtoOutputStream output;
@@ -6563,7 +6604,8 @@
// first delayed pull on the bucket #1 edge
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + pullDelayNs);
// the delayed pull did close the first bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -6572,7 +6614,7 @@
// second pull on the bucket #2 boundary on time
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// the second pull did close the second bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 5},
@@ -6613,7 +6655,8 @@
vector<shared_ptr<LogEvent>> allData;
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + delayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + delayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + delayNs);
// first delayed pull on the bucket #1 edge
// the delayed pull did close the first bucket with condition duration == conditionDurationNs
@@ -6627,7 +6670,7 @@
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// second pull on the bucket #2 edge is on time
assertPastBucketValuesSingleKey(
@@ -6683,7 +6726,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 30));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// second pull on the bucket #2 edge is on time
// the pull did close the second bucket with condition where
@@ -6786,7 +6829,7 @@
vector<shared_ptr<LogEvent>> allData;
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket1LatePullNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket1LatePullNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket1LatePullNs);
// first delayed pull on the bucket #1 edge
// the delayed pull did close the first bucket with condition duration == bucketSizeNs
@@ -6800,7 +6843,7 @@
// will force delayed pull & bucket #2 close
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2LatePullNs, 25));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2LatePullNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2LatePullNs);
// second delayed pull on the bucket #2 edge
// the pull did close the second bucket with condition true
@@ -6817,7 +6860,7 @@
// will force pull on time & bucket #3 close
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs, 40));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
// the pull did close the third bucket with condition true
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 10, 15},
@@ -6854,7 +6897,8 @@
// first delayed pull on the bucket #1 edge
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + pullDelayNs);
// the delayed pull did close the first bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -6863,7 +6907,7 @@
// second pull on the bucket #2 boundary on time
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// the second pull did close the second bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 5},
@@ -6874,7 +6918,7 @@
// third pull on the bucket #3 boundary on time
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs, 20));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
// the third pull did close the third bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 5, 5},
@@ -6906,7 +6950,8 @@
// first delayed pull on the bucket #1 edge with delay
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + pullDelayNs);
// the delayed pull did close the first bucket which is skipped
// skipped due to bucket does not contains any value
@@ -6916,7 +6961,7 @@
// second pull on the bucket #2 boundary on time
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// the second pull did close the second bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs},
@@ -6925,7 +6970,7 @@
// third pull on the bucket #3 boundary on time
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket4StartTimeNs, 20));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket4StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket4StartTimeNs);
// the third pull did close the third bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(
@@ -6966,7 +7011,8 @@
// first delayed pull on the bucket #1 edge
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + pullDelayNs);
// the delayed pull did close the first bucket with condition duration == bucketSizeNs
// and the condition correction == pull delay
@@ -7022,7 +7068,8 @@
// first delayed pull on the bucket #1 edge
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + pullDelayNs);
// the delayed pull did close the first bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -7079,7 +7126,8 @@
// first delayed pull on the bucket #1 edge
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + pullDelayNs);
// the delayed pull did close the first bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -7088,7 +7136,7 @@
// second pull on the bucket #2 boundary on time
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket3StartTimeNs, 15));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket3StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket3StartTimeNs);
// the second pull did close the second bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5, 5},
@@ -7149,7 +7197,8 @@
// first delayed pull on the bucket #1 edge
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + pullDelayNs);
// the delayed pull did close the first bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -7206,7 +7255,8 @@
// first delayed pull on the bucket #1 edge
allData.clear();
allData.push_back(CreateRepeatedValueLogEvent(tagId, bucket2StartTimeNs + pullDelayNs, 10));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs + pullDelayNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS,
+ bucket2StartTimeNs + pullDelayNs);
// the delayed pull did close the first bucket with condition duration == bucketSizeNs
assertPastBucketValuesSingleKey(valueProducer->mPastBuckets, {5}, {bucketSizeNs}, {pullDelayNs},
@@ -7389,7 +7439,7 @@
allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 1 /*uid*/, 11, 7));
allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 2 /*uid*/, 8, 5));
allData.push_back(CreateThreeValueLogEvent(tagId, bucket2StartTimeNs + 1, 2 /*uid*/, 9, 7));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
ASSERT_EQ(0UL, valueProducer->mCurrentSlicedBucket.size());
ASSERT_EQ(2UL, valueProducer->mDimInfos.size());
@@ -7468,7 +7518,7 @@
allData.clear();
allData.push_back(makeRepeatedUidLogEvent(tagId, bucket2StartTimeNs + 1, {1, 10}, 5, {5, 7}));
allData.push_back(makeRepeatedUidLogEvent(tagId, bucket2StartTimeNs + 1, {2, 10}, 5, {7, 5}));
- valueProducer->onDataPulled(allData, /** succeed */ true, bucket2StartTimeNs);
+ valueProducer->onDataPulled(allData, PullResult::PULL_RESULT_SUCCESS, bucket2StartTimeNs);
// Check dump report.
ProtoOutputStream output;
@@ -7505,6 +7555,184 @@
0); // Summed diffs of 7, 14
}
+TEST(NumericValueMetricProducerTest, TestSampleSize) {
+ sp<EventMatcherWizard> eventMatcherWizard =
+ createEventMatcherWizard(tagId, logEventMatcherIndex);
+ sp<MockConditionWizard> wizard = new NaggyMock<MockConditionWizard>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ ValueMetric metric = NumericValueMetricProducerTestHelper::createMetric();
+
+ /*Sample size is added automatically with ValueMetric::AVG*/
+ metric.set_aggregation_type(ValueMetric::AVG);
+ sp<NumericValueMetricProducer> valueProducerAvg =
+ NumericValueMetricProducerTestHelper::createValueProducerNoConditions(
+ pullerManager, metric, /*pullAtomId=*/-1);
+
+ /*Sample size is not added automatically with non-ValueMetric::AVG aggregation types*/
+ metric.set_aggregation_type(ValueMetric::SUM);
+ sp<NumericValueMetricProducer> valueProducerSum =
+ NumericValueMetricProducerTestHelper::createValueProducerNoConditions(
+ pullerManager, metric, /*pullAtomId=*/-1);
+
+ /*Sample size is added when include_sample_size bool is set to true*/
+ metric.set_include_sample_size(true);
+ sp<NumericValueMetricProducer> valueProducerSumWithSampleSize =
+ NumericValueMetricProducerTestHelper::createValueProducerNoConditions(
+ pullerManager, metric, /*pullAtomId=*/-1);
+
+ LogEvent event1(/*uid=*/0, /*pid=*/0);
+ LogEvent event2(/*uid=*/0, /*pid=*/0);
+ LogEvent event3(/*uid=*/0, /*pid=*/0);
+ CreateRepeatedValueLogEvent(&event1, tagId, bucketStartTimeNs + 10, 10);
+ CreateRepeatedValueLogEvent(&event2, tagId, bucketStartTimeNs + 20, 15);
+ CreateRepeatedValueLogEvent(&event3, tagId, bucketStartTimeNs + 20, 20);
+ valueProducerAvg->onMatchedLogEvent(1 /*log matcher index*/, event1);
+ valueProducerAvg->onMatchedLogEvent(1 /*log matcher index*/, event2);
+ valueProducerSum->onMatchedLogEvent(1 /*log matcher index*/, event1);
+ valueProducerSum->onMatchedLogEvent(1 /*log matcher index*/, event2);
+ valueProducerSum->onMatchedLogEvent(1 /*log matcher index*/, event3);
+ valueProducerSumWithSampleSize->onMatchedLogEvent(1 /*log matcher index*/, event1);
+ valueProducerSumWithSampleSize->onMatchedLogEvent(1 /*log matcher index*/, event2);
+ valueProducerSumWithSampleSize->onMatchedLogEvent(1 /*log matcher index*/, event3);
+
+ NumericValueMetricProducer::Interval curInterval;
+ ASSERT_EQ(1UL, valueProducerAvg->mCurrentSlicedBucket.size());
+ curInterval = valueProducerAvg->mCurrentSlicedBucket.begin()->second.intervals[0];
+ EXPECT_EQ(2, curInterval.sampleSize);
+ ASSERT_EQ(1UL, valueProducerSum->mCurrentSlicedBucket.size());
+ curInterval = valueProducerSum->mCurrentSlicedBucket.begin()->second.intervals[0];
+ EXPECT_EQ(3, curInterval.sampleSize);
+ ASSERT_EQ(1UL, valueProducerSumWithSampleSize->mCurrentSlicedBucket.size());
+ curInterval = valueProducerSumWithSampleSize->mCurrentSlicedBucket.begin()->second.intervals[0];
+ EXPECT_EQ(3, curInterval.sampleSize);
+
+ valueProducerAvg->flushIfNeededLocked(bucket2StartTimeNs);
+ valueProducerSum->flushIfNeededLocked(bucket2StartTimeNs);
+ valueProducerSumWithSampleSize->flushIfNeededLocked(bucket2StartTimeNs);
+
+ // Start dump report and check output.
+ ProtoOutputStream outputAvg;
+ std::set<string> strSetAvg;
+ valueProducerAvg->onDumpReport(bucket2StartTimeNs + 50 * NS_PER_SEC,
+ true /* include recent buckets */, true, NO_TIME_CONSTRAINTS,
+ &strSetAvg, &outputAvg);
+
+ StatsLogReport reportAvg = outputStreamToProto(&outputAvg);
+ ASSERT_EQ(1, reportAvg.value_metrics().data_size());
+
+ ValueMetricData data = reportAvg.value_metrics().data(0);
+ ASSERT_EQ(1, data.bucket_info_size());
+ ASSERT_EQ(1, data.bucket_info(0).values_size());
+ EXPECT_EQ(2, data.bucket_info(0).values(0).sample_size());
+ EXPECT_TRUE(std::abs(data.bucket_info(0).values(0).value_double() - 12.5) < epsilon);
+
+ // Start dump report and check output.
+ ProtoOutputStream outputSum;
+ std::set<string> strSetSum;
+ valueProducerSum->onDumpReport(bucket2StartTimeNs + 50 * NS_PER_SEC,
+ true /* include recent buckets */, true, NO_TIME_CONSTRAINTS,
+ &strSetSum, &outputSum);
+
+ StatsLogReport reportSum = outputStreamToProto(&outputSum);
+ ASSERT_EQ(1, reportSum.value_metrics().data_size());
+
+ data = reportSum.value_metrics().data(0);
+ ASSERT_EQ(1, data.bucket_info_size());
+ ASSERT_EQ(1, data.bucket_info(0).values_size());
+ EXPECT_EQ(45, data.bucket_info(0).values(0).value_long());
+ EXPECT_FALSE(data.bucket_info(0).values(0).has_sample_size());
+
+ // Start dump report and check output.
+ ProtoOutputStream outputSumWithSampleSize;
+ std::set<string> strSetSumWithSampleSize;
+ valueProducerSumWithSampleSize->onDumpReport(
+ bucket2StartTimeNs + 50 * NS_PER_SEC, true /* include recent buckets */, true,
+ NO_TIME_CONSTRAINTS, &strSetSumWithSampleSize, &outputSumWithSampleSize);
+
+ StatsLogReport reportSumWithSampleSize = outputStreamToProto(&outputSumWithSampleSize);
+ ASSERT_EQ(1, reportSumWithSampleSize.value_metrics().data_size());
+
+ data = reportSumWithSampleSize.value_metrics().data(0);
+ ASSERT_EQ(1, data.bucket_info_size());
+ ASSERT_EQ(1, data.bucket_info(0).values_size());
+ EXPECT_EQ(3, data.bucket_info(0).values(0).sample_size());
+ EXPECT_EQ(45, data.bucket_info(0).values(0).value_long());
+}
+
+TEST(NumericValueMetricProducerTest, TestDimensionalSampling) {
+ ShardOffsetProvider::getInstance().setShardOffset(5);
+
+ int shardCount = 2;
+ ValueMetric sampledValueMetric = NumericValueMetricProducerTestHelper::createMetric();
+ *sampledValueMetric.mutable_dimensions_in_what() = CreateDimensions(tagId, {1 /*uid*/});
+ *sampledValueMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(tagId, {1 /*uid*/});
+ sampledValueMetric.mutable_dimensional_sampling_info()->set_shard_count(shardCount);
+
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ EXPECT_CALL(*pullerManager, Pull(tagId, kConfigKey, _, _))
+ // First field is a dimension field and sampled what field.
+ // Second field is the value field.
+ // NumericValueMetricProducer initialized.
+ .WillOnce(Invoke([](int tagId, const ConfigKey&, const int64_t eventTimeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 1, 1001, 5, 10));
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 1, 1002, 10, 10));
+ data->push_back(makeUidLogEvent(tagId, bucketStartTimeNs + 1, 1003, 15, 10));
+ return true;
+ }))
+ // Dump report pull.
+ .WillOnce(Invoke([](int tagId, const ConfigKey&, const int64_t eventTimeNs,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ data->push_back(
+ makeUidLogEvent(tagId, bucketStartTimeNs + 10000000000, 1001, 6, 10));
+ data->push_back(
+ makeUidLogEvent(tagId, bucketStartTimeNs + 10000000000, 1002, 12, 10));
+ data->push_back(
+ makeUidLogEvent(tagId, bucketStartTimeNs + 10000000000, 1003, 18, 10));
+ return true;
+ }));
+
+ sp<NumericValueMetricProducer> valueProducer =
+ NumericValueMetricProducerTestHelper::createValueProducerWithSampling(
+ pullerManager, sampledValueMetric);
+
+ // Check dump report.
+ ProtoOutputStream output;
+ std::set<string> strSet;
+ int64_t dumpReportTimeNs = bucketStartTimeNs + 10000000000;
+ valueProducer->onDumpReport(dumpReportTimeNs, true /* include current buckets */, true,
+ NO_TIME_CONSTRAINTS /* dumpLatency */, &strSet, &output);
+
+ StatsLogReport report = outputStreamToProto(&output);
+ backfillDimensionPath(&report);
+ backfillStartEndTimestamp(&report);
+ EXPECT_TRUE(report.has_value_metrics());
+ StatsLogReport::ValueMetricDataWrapper valueMetrics;
+ sortMetricDataByDimensionsValue(report.value_metrics(), &valueMetrics);
+ ASSERT_EQ(2, valueMetrics.data_size());
+ EXPECT_EQ(0, report.value_metrics().skipped_size());
+
+ // Only Uid 1, 3, 4 are logged. (odd hash value) + (offset of 5) % (shard count of 2) = 0
+ ValueMetricData data = valueMetrics.data(0);
+ ValidateUidDimension(data.dimensions_in_what(), tagId, 1001);
+ ASSERT_EQ(1, data.bucket_info_size());
+ ValidateValueBucket(data.bucket_info(0), bucketStartTimeNs, bucketStartTimeNs + 10000000000,
+ {1}, -1,
+ 0); // Diff of 5 and 6
+
+ data = valueMetrics.data(1);
+ ValidateUidDimension(data.dimensions_in_what(), tagId, 1003);
+ ASSERT_EQ(1, data.bucket_info_size());
+ ValidateValueBucket(data.bucket_info(0), bucketStartTimeNs, bucketStartTimeNs + 10000000000,
+ {3}, -1,
+ 0); // Diff of 15 and 18
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/metrics/OringDurationTracker_test.cpp b/statsd/tests/metrics/OringDurationTracker_test.cpp
index 369f473..a4722ca 100644
--- a/statsd/tests/metrics/OringDurationTracker_test.cpp
+++ b/statsd/tests/metrics/OringDurationTracker_test.cpp
@@ -202,7 +202,7 @@
tracker.noteStart(kEventKey1, true, eventStartTimeNs, key1);
- tracker.onSlicedConditionMayChange(true, eventStartTimeNs + 5);
+ tracker.onSlicedConditionMayChange(eventStartTimeNs + 5);
tracker.noteStop(kEventKey1, eventStartTimeNs + durationTimeNs, false);
@@ -241,9 +241,9 @@
tracker.noteStart(kEventKey1, true, eventStartTimeNs, key1);
// condition to false; record duration 5n
- tracker.onSlicedConditionMayChange(true, eventStartTimeNs + 5);
+ tracker.onSlicedConditionMayChange(eventStartTimeNs + 5);
// condition to true.
- tracker.onSlicedConditionMayChange(true, eventStartTimeNs + 1000);
+ tracker.onSlicedConditionMayChange(eventStartTimeNs + 1000);
// 2nd duration: 1000ns
tracker.noteStop(kEventKey1, eventStartTimeNs + durationTimeNs, false);
@@ -281,7 +281,7 @@
tracker.noteStop(kEventKey1, eventStartTimeNs + 3, false);
- tracker.onSlicedConditionMayChange(true, eventStartTimeNs + 15);
+ tracker.onSlicedConditionMayChange(eventStartTimeNs + 15);
tracker.noteStop(kEventKey1, eventStartTimeNs + 2003, false);
diff --git a/statsd/tests/metrics/RestrictedEventMetricProducer_test.cpp b/statsd/tests/metrics/RestrictedEventMetricProducer_test.cpp
new file mode 100644
index 0000000..b6c4545
--- /dev/null
+++ b/statsd/tests/metrics/RestrictedEventMetricProducer_test.cpp
@@ -0,0 +1,266 @@
+#include "src/metrics/RestrictedEventMetricProducer.h"
+
+#include <android-modules-utils/sdk_level.h>
+#include <gtest/gtest.h>
+
+#include "flags/FlagProvider.h"
+#include "metrics_test_helper.h"
+#include "stats_annotations.h"
+#include "tests/statsd_test_util.h"
+#include "utils/DbUtils.h"
+
+using namespace testing;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+#ifdef __ANDROID__
+
+namespace android {
+namespace os {
+namespace statsd {
+
+using android::modules::sdklevel::IsAtLeastU;
+
+namespace {
+const ConfigKey configKey(/*uid=*/0, /*id=*/12345);
+const int64_t metricId1 = 123;
+const int64_t metricId2 = 456;
+
+bool metricTableExist(int64_t metricId) {
+ stringstream query;
+ query << "SELECT * FROM metric_" << metricId;
+ vector<int32_t> columnTypes;
+ vector<vector<string>> rows;
+ vector<string> columnNames;
+ string err;
+ return dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err);
+}
+} // anonymous namespace
+
+class RestrictedEventMetricProducerTest : public Test {
+protected:
+ void SetUp() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ }
+ void TearDown() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ dbutils::deleteDb(configKey);
+ FlagProvider::getInstance().resetOverrides();
+ }
+};
+
+TEST_F(RestrictedEventMetricProducerTest, TestOnMatchedLogEventMultipleEvents) {
+ EventMetric metric;
+ metric.set_id(metricId1);
+ RestrictedEventMetricProducer producer(configKey, metric,
+ /*conditionIndex=*/-1,
+ /*initialConditionCache=*/{}, new ConditionWizard(),
+ /*protoHash=*/0x1234567890,
+ /*startTimeNs=*/0);
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(/*atomTag=*/123, /*timestampNs=*/1);
+ std::unique_ptr<LogEvent> event2 = CreateRestrictedLogEvent(/*atomTag=*/123, /*timestampNs=*/3);
+
+ producer.onMatchedLogEvent(/*matcherIndex=*/1, *event1);
+ producer.onMatchedLogEvent(/*matcherIndex=*/1, *event2);
+ producer.flushRestrictedData();
+
+ stringstream query;
+ query << "SELECT * FROM metric_" << metricId1;
+ string err;
+ vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ vector<vector<string>> rows;
+ dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err);
+ ASSERT_EQ(rows.size(), 2);
+ EXPECT_EQ(columnTypes.size(),
+ 3 + event1->getValues().size()); // col 0:2 are reserved for metadata.
+ EXPECT_EQ(/*tagId=*/rows[0][0], to_string(event1->GetTagId()));
+ EXPECT_EQ(/*elapsedTimestampNs=*/rows[0][1], to_string(event1->GetElapsedTimestampNs()));
+ EXPECT_EQ(/*elapsedTimestampNs=*/rows[1][1], to_string(event2->GetElapsedTimestampNs()));
+
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(RestrictedEventMetricProducerTest, TestOnMatchedLogEventMultipleFields) {
+ EventMetric metric;
+ metric.set_id(metricId2);
+ RestrictedEventMetricProducer producer(configKey, metric,
+ /*conditionIndex=*/-1,
+ /*initialConditionCache=*/{}, new ConditionWizard(),
+ /*protoHash=*/0x1234567890,
+ /*startTimeNs=*/0);
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, 1);
+ AStatsEvent_addInt32Annotation(statsEvent, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY,
+ ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC);
+ AStatsEvent_overwriteTimestamp(statsEvent, 1);
+
+ AStatsEvent_writeString(statsEvent, "111");
+ AStatsEvent_writeInt32(statsEvent, 11);
+ AStatsEvent_writeFloat(statsEvent, 11.0);
+ LogEvent logEvent(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, &logEvent);
+
+ producer.onMatchedLogEvent(/*matcherIndex=1*/ 1, logEvent);
+ producer.flushRestrictedData();
+
+ stringstream query;
+ query << "SELECT * FROM metric_" << metricId2;
+ string err;
+ vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ vector<vector<string>> rows;
+ EXPECT_TRUE(dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err));
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_EQ(columnTypes.size(),
+ 3 + logEvent.getValues().size()); // col 0:2 are reserved for metadata.
+ EXPECT_EQ(/*field1=*/rows[0][3], "111");
+ EXPECT_EQ(/*field2=*/rows[0][4], "11");
+ EXPECT_FLOAT_EQ(/*field3=*/std::stof(rows[0][5]), 11.0);
+
+ EXPECT_THAT(columnNames, ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs",
+ "field_1", "field_2", "field_3"));
+}
+
+TEST_F(RestrictedEventMetricProducerTest, TestOnMatchedLogEventWithCondition) {
+ EventMetric metric;
+ metric.set_id(metricId1);
+ metric.set_condition(StringToId("SCREEN_ON"));
+ RestrictedEventMetricProducer producer(configKey, metric,
+ /*conditionIndex=*/0,
+ /*initialConditionCache=*/{ConditionState::kUnknown},
+ new ConditionWizard(),
+ /*protoHash=*/0x1234567890,
+ /*startTimeNs=*/0);
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(/*atomTag=*/123, /*timestampNs=*/1);
+ std::unique_ptr<LogEvent> event2 = CreateRestrictedLogEvent(/*atomTag=*/123, /*timestampNs=*/3);
+
+ producer.onConditionChanged(true, 0);
+ producer.onMatchedLogEvent(/*matcherIndex=*/1, *event1);
+ producer.onConditionChanged(false, 1);
+ producer.onMatchedLogEvent(/*matcherIndex=*/1, *event2);
+ producer.flushRestrictedData();
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << metricId1;
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err);
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_EQ(columnTypes.size(), 3 + event1->getValues().size());
+ EXPECT_EQ(/*elapsedTimestampNs=*/rows[0][1], to_string(event1->GetElapsedTimestampNs()));
+
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(RestrictedEventMetricProducerTest, TestOnDumpReportNoOp) {
+ EventMetric metric;
+ metric.set_id(metricId1);
+ RestrictedEventMetricProducer producer(configKey, metric,
+ /*conditionIndex=*/-1,
+ /*initialConditionCache=*/{}, new ConditionWizard(),
+ /*protoHash=*/0x1234567890,
+ /*startTimeNs=*/0);
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(/*timestampNs=*/1);
+ producer.onMatchedLogEvent(/*matcherIndex=*/1, *event1);
+ ProtoOutputStream output;
+ std::set<string> strSet;
+ producer.onDumpReport(/*dumpTimeNs=*/10,
+ /*include_current_partial_bucket=*/true,
+ /*erase_data=*/true, FAST, &strSet, &output);
+
+ ASSERT_EQ(output.size(), 0);
+ ASSERT_EQ(strSet.size(), 0);
+}
+
+TEST_F(RestrictedEventMetricProducerTest, TestOnMetricRemove) {
+ EventMetric metric;
+ metric.set_id(metricId1);
+ RestrictedEventMetricProducer producer(configKey, metric,
+ /*conditionIndex=*/-1,
+ /*initialConditionCache=*/{}, new ConditionWizard(),
+ /*protoHash=*/0x1234567890,
+ /*startTimeNs=*/0);
+ EXPECT_FALSE(metricTableExist(metricId1));
+
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(/*timestampNs=*/1);
+ producer.onMatchedLogEvent(/*matcherIndex=*/1, *event1);
+ producer.flushRestrictedData();
+ EXPECT_TRUE(metricTableExist(metricId1));
+
+ producer.onMetricRemove();
+ EXPECT_FALSE(metricTableExist(metricId1));
+}
+
+TEST_F(RestrictedEventMetricProducerTest, TestRestrictedEventMetricTtlDeletesFirstEvent) {
+ EventMetric metric;
+ metric.set_id(metricId1);
+ RestrictedEventMetricProducer producer(configKey, metric,
+ /*conditionIndex=*/-1,
+ /*initialConditionCache=*/{}, new ConditionWizard(),
+ /*protoHash=*/0x1234567890,
+ /*startTimeNs=*/0);
+
+ int64_t currentTimeNs = getWallClockNs();
+ int64_t eightDaysAgo = currentTimeNs - 8 * 24 * 3600 * NS_PER_SEC;
+ std::unique_ptr<LogEvent> event1 = CreateRestrictedLogEvent(/*atomTag=*/123, /*timestampNs=*/1);
+ event1->setLogdWallClockTimestampNs(eightDaysAgo);
+ std::unique_ptr<LogEvent> event2 = CreateRestrictedLogEvent(/*atomTag=*/123, /*timestampNs=*/3);
+ event2->setLogdWallClockTimestampNs(currentTimeNs);
+
+ producer.onMatchedLogEvent(/*matcherIndex=*/1, *event1);
+ producer.onMatchedLogEvent(/*matcherIndex=*/1, *event2);
+ producer.flushRestrictedData();
+ sqlite3* dbHandle = dbutils::getDb(configKey);
+ producer.enforceRestrictedDataTtl(dbHandle, currentTimeNs + 100);
+ dbutils::closeDb(dbHandle);
+
+ std::stringstream query;
+ query << "SELECT * FROM metric_" << metricId1;
+ string err;
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ dbutils::query(configKey, query.str(), rows, columnTypes, columnNames, err);
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_EQ(columnTypes.size(), 3 + event1->getValues().size());
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+ EXPECT_THAT(rows[0], ElementsAre(to_string(event2->GetTagId()),
+ to_string(event2->GetElapsedTimestampNs()),
+ to_string(currentTimeNs), _));
+}
+
+TEST_F(RestrictedEventMetricProducerTest, TestLoadMetricMetadataSetsCategory) {
+ metadata::MetricMetadata metricMetadata;
+ metricMetadata.set_metric_id(metricId1);
+ metricMetadata.set_restricted_category(1); // CATEGORY_DIAGNOSTIC
+ EventMetric metric;
+ metric.set_id(metricId1);
+ RestrictedEventMetricProducer producer(configKey, metric,
+ /*conditionIndex=*/-1,
+ /*initialConditionCache=*/{}, new ConditionWizard(),
+ /*protoHash=*/0x1234567890,
+ /*startTimeNs=*/0);
+
+ producer.loadMetricMetadataFromProto(metricMetadata);
+
+ EXPECT_EQ(producer.getRestrictionCategory(), CATEGORY_DIAGNOSTIC);
+}
+
+} // namespace statsd
+} // namespace os
+} // namespace android
+
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
\ No newline at end of file
diff --git a/statsd/tests/metrics/parsing_utils/config_update_utils_test.cpp b/statsd/tests/metrics/parsing_utils/config_update_utils_test.cpp
index b9eb28e..d7374a5 100644
--- a/statsd/tests/metrics/parsing_utils/config_update_utils_test.cpp
+++ b/statsd/tests/metrics/parsing_utils/config_update_utils_test.cpp
@@ -25,7 +25,6 @@
#include "src/condition/CombinationConditionTracker.h"
#include "src/condition/SimpleConditionTracker.h"
-#include "src/flags/FlagProvider.h"
#include "src/matchers/CombinationAtomMatchingTracker.h"
#include "src/metrics/DurationMetricProducer.h"
#include "src/metrics/GaugeMetricProducer.h"
@@ -63,41 +62,59 @@
/*minDiffToUpdateRegisteredAlarmTimeSec=*/0,
[](const shared_ptr<IStatsCompanionService>&, int64_t) {},
[](const shared_ptr<IStatsCompanionService>&) {});
-set<int> allTagIds;
+unordered_map<int, vector<int>> allTagIdsToMatchersMap;
vector<sp<AtomMatchingTracker>> oldAtomMatchingTrackers;
unordered_map<int64_t, int> oldAtomMatchingTrackerMap;
vector<sp<ConditionTracker>> oldConditionTrackers;
unordered_map<int64_t, int> oldConditionTrackerMap;
vector<sp<MetricProducer>> oldMetricProducers;
unordered_map<int64_t, int> oldMetricProducerMap;
-std::vector<sp<AnomalyTracker>> oldAnomalyTrackers;
+vector<sp<AnomalyTracker>> oldAnomalyTrackers;
unordered_map<int64_t, int> oldAlertTrackerMap;
-std::vector<sp<AlarmTracker>> oldAlarmTrackers;
-unordered_map<int, std::vector<int>> tmpConditionToMetricMap;
-unordered_map<int, std::vector<int>> tmpTrackerToMetricMap;
-unordered_map<int, std::vector<int>> tmpTrackerToConditionMap;
-unordered_map<int, std::vector<int>> tmpActivationAtomTrackerToMetricMap;
-unordered_map<int, std::vector<int>> tmpDeactivationAtomTrackerToMetricMap;
+vector<sp<AlarmTracker>> oldAlarmTrackers;
+unordered_map<int, vector<int>> tmpConditionToMetricMap;
+unordered_map<int, vector<int>> tmpTrackerToMetricMap;
+unordered_map<int, vector<int>> tmpTrackerToConditionMap;
+unordered_map<int, vector<int>> tmpActivationAtomTrackerToMetricMap;
+unordered_map<int, vector<int>> tmpDeactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
map<int64_t, uint64_t> oldStateHashes;
-std::set<int64_t> noReportMetricIds;
+set<int64_t> noReportMetricIds;
bool initConfig(const StatsdConfig& config) {
- return initStatsdConfig(
- key, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseNs, timeBaseNs, allTagIds, oldAtomMatchingTrackers, oldAtomMatchingTrackerMap,
- oldConditionTrackers, oldConditionTrackerMap, oldMetricProducers, oldMetricProducerMap,
- oldAnomalyTrackers, oldAlarmTrackers, tmpConditionToMetricMap, tmpTrackerToMetricMap,
- tmpTrackerToConditionMap, tmpActivationAtomTrackerToMetricMap,
- tmpDeactivationAtomTrackerToMetricMap, oldAlertTrackerMap, metricsWithActivation,
- oldStateHashes, noReportMetricIds);
+ // initStatsdConfig returns nullopt if config is valid
+ return !initStatsdConfig(
+ key, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
+ timeBaseNs, timeBaseNs, allTagIdsToMatchersMap, oldAtomMatchingTrackers,
+ oldAtomMatchingTrackerMap, oldConditionTrackers, oldConditionTrackerMap,
+ oldMetricProducers, oldMetricProducerMap, oldAnomalyTrackers, oldAlarmTrackers,
+ tmpConditionToMetricMap, tmpTrackerToMetricMap, tmpTrackerToConditionMap,
+ tmpActivationAtomTrackerToMetricMap, tmpDeactivationAtomTrackerToMetricMap,
+ oldAlertTrackerMap, metricsWithActivation, oldStateHashes, noReportMetricIds)
+ .has_value();
}
+
+vector<int> filterMatcherIndexesById(const vector<sp<AtomMatchingTracker>>& atomMatchingTrackers,
+ const vector<int64_t>& ids) {
+ vector<int> result;
+
+ for (auto& id : ids) {
+ for (int i = 0; i < atomMatchingTrackers.size(); i++) {
+ if (atomMatchingTrackers[i]->getId() == id) {
+ result.push_back(i);
+ }
+ }
+ }
+
+ return result;
+}
+
} // anonymous namespace
class ConfigUpdateTest : public ::testing::Test {
public:
void SetUp() override {
- allTagIds.clear();
+ allTagIdsToMatchersMap.clear();
oldAtomMatchingTrackers.clear();
oldAtomMatchingTrackerMap.clear();
oldConditionTrackers.clear();
@@ -116,11 +133,6 @@
oldStateHashes.clear();
noReportMetricIds.clear();
StateManager::getInstance().clear();
- FlagProvider::getInstance().overrideFuncs(&isAtLeastSFuncTrue, &getServerFlagFuncTrue);
- }
-
- void TearDown() override {
- FlagProvider::getInstance().resetOverrides();
}
};
@@ -137,9 +149,10 @@
vector<bool> cycleTracker(1, false);
unordered_map<int64_t, int> newAtomMatchingTrackerMap;
newAtomMatchingTrackerMap[matcherId] = 0;
- EXPECT_TRUE(determineMatcherUpdateStatus(config, 0, oldAtomMatchingTrackerMap,
- oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
- matchersToUpdate, cycleTracker));
+ EXPECT_EQ(determineMatcherUpdateStatus(config, 0, oldAtomMatchingTrackerMap,
+ oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
+ matchersToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(matchersToUpdate[0], UPDATE_PRESERVE);
}
@@ -161,9 +174,10 @@
vector<bool> cycleTracker(1, false);
unordered_map<int64_t, int> newAtomMatchingTrackerMap;
newAtomMatchingTrackerMap[matcherId] = 0;
- EXPECT_TRUE(determineMatcherUpdateStatus(newConfig, 0, oldAtomMatchingTrackerMap,
- oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
- matchersToUpdate, cycleTracker));
+ EXPECT_EQ(determineMatcherUpdateStatus(newConfig, 0, oldAtomMatchingTrackerMap,
+ oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
+ matchersToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(matchersToUpdate[0], UPDATE_REPLACE);
}
@@ -185,9 +199,10 @@
vector<bool> cycleTracker(1, false);
unordered_map<int64_t, int> newAtomMatchingTrackerMap;
newAtomMatchingTrackerMap[matcherId] = 0;
- EXPECT_TRUE(determineMatcherUpdateStatus(newConfig, 0, oldAtomMatchingTrackerMap,
- oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
- matchersToUpdate, cycleTracker));
+ EXPECT_EQ(determineMatcherUpdateStatus(newConfig, 0, oldAtomMatchingTrackerMap,
+ oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
+ matchersToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(matchersToUpdate[0], UPDATE_NEW);
}
@@ -225,9 +240,10 @@
vector<UpdateStatus> matchersToUpdate(3, UPDATE_UNKNOWN);
vector<bool> cycleTracker(3, false);
// Only update the combination. It should recurse the two child matchers and preserve all 3.
- EXPECT_TRUE(determineMatcherUpdateStatus(newConfig, 1, oldAtomMatchingTrackerMap,
- oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
- matchersToUpdate, cycleTracker));
+ EXPECT_EQ(determineMatcherUpdateStatus(newConfig, 1, oldAtomMatchingTrackerMap,
+ oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
+ matchersToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(matchersToUpdate[0], UPDATE_PRESERVE);
EXPECT_EQ(matchersToUpdate[1], UPDATE_PRESERVE);
EXPECT_EQ(matchersToUpdate[2], UPDATE_PRESERVE);
@@ -269,9 +285,10 @@
vector<UpdateStatus> matchersToUpdate(3, UPDATE_UNKNOWN);
vector<bool> cycleTracker(3, false);
// Only update the combination. The simple matchers should not be evaluated.
- EXPECT_TRUE(determineMatcherUpdateStatus(newConfig, 1, oldAtomMatchingTrackerMap,
- oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
- matchersToUpdate, cycleTracker));
+ EXPECT_EQ(determineMatcherUpdateStatus(newConfig, 1, oldAtomMatchingTrackerMap,
+ oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
+ matchersToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(matchersToUpdate[0], UPDATE_UNKNOWN);
EXPECT_EQ(matchersToUpdate[1], UPDATE_REPLACE);
EXPECT_EQ(matchersToUpdate[2], UPDATE_UNKNOWN);
@@ -313,9 +330,10 @@
vector<UpdateStatus> matchersToUpdate(3, UPDATE_UNKNOWN);
vector<bool> cycleTracker(3, false);
// Only update the combination.
- EXPECT_TRUE(determineMatcherUpdateStatus(newConfig, 1, oldAtomMatchingTrackerMap,
- oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
- matchersToUpdate, cycleTracker));
+ EXPECT_EQ(determineMatcherUpdateStatus(newConfig, 1, oldAtomMatchingTrackerMap,
+ oldAtomMatchingTrackers, newAtomMatchingTrackerMap,
+ matchersToUpdate, cycleTracker),
+ nullopt);
// Matcher 2 and matcher3 must be reevaluated. Matcher 1 might, but does not need to be.
EXPECT_EQ(matchersToUpdate[0], UPDATE_REPLACE);
EXPECT_EQ(matchersToUpdate[1], UPDATE_REPLACE);
@@ -382,19 +400,34 @@
*newConfig.add_atom_matcher() = simple4;
*newConfig.add_atom_matcher() = combination1;
- set<int> newTagIds;
+ unordered_map<int, vector<int>> newTagIds;
unordered_map<int64_t, int> newAtomMatchingTrackerMap;
vector<sp<AtomMatchingTracker>> newAtomMatchingTrackers;
set<int64_t> replacedMatchers;
- EXPECT_TRUE(updateAtomMatchingTrackers(
- newConfig, uidMap, oldAtomMatchingTrackerMap, oldAtomMatchingTrackers, newTagIds,
- newAtomMatchingTrackerMap, newAtomMatchingTrackers, replacedMatchers));
+ EXPECT_EQ(updateAtomMatchingTrackers(newConfig, uidMap, oldAtomMatchingTrackerMap,
+ oldAtomMatchingTrackers, newTagIds,
+ newAtomMatchingTrackerMap, newAtomMatchingTrackers,
+ replacedMatchers),
+ nullopt);
ASSERT_EQ(newTagIds.size(), 3);
EXPECT_EQ(newTagIds.count(10), 1);
EXPECT_EQ(newTagIds.count(111), 1);
EXPECT_EQ(newTagIds.count(13), 1);
+ EXPECT_EQ(newTagIds[10].size(), 3); // simple1, combination1, combination2
+ EXPECT_THAT(newTagIds[10], UnorderedElementsAreArray(filterMatcherIndexesById(
+ newAtomMatchingTrackers,
+ {simple1.id(), combination1.id(), combination2.id()})));
+ EXPECT_EQ(newTagIds[111].size(), 3); // simple2, combination2, combination3
+ EXPECT_THAT(newTagIds[111], UnorderedElementsAreArray(filterMatcherIndexesById(
+ newAtomMatchingTrackers,
+ {simple2.id(), combination2.id(), combination3.id()})));
+ EXPECT_EQ(newTagIds[13].size(), 2); // simple4, combination3
+ EXPECT_THAT(newTagIds[13],
+ UnorderedElementsAreArray(filterMatcherIndexesById(
+ newAtomMatchingTrackers, {simple4.id(), combination3.id()})));
+
ASSERT_EQ(newAtomMatchingTrackerMap.size(), 6);
EXPECT_EQ(newAtomMatchingTrackerMap.at(combination3Id), 0);
EXPECT_EQ(newAtomMatchingTrackerMap.at(simple2Id), 1);
@@ -478,9 +511,10 @@
vector<bool> cycleTracker(1, false);
unordered_map<int64_t, int> newConditionTrackerMap;
newConditionTrackerMap[predicate.id()] = 0;
- EXPECT_TRUE(determineConditionUpdateStatus(config, 0, oldConditionTrackerMap,
- oldConditionTrackers, newConditionTrackerMap,
- replacedMatchers, conditionsToUpdate, cycleTracker));
+ EXPECT_EQ(determineConditionUpdateStatus(config, 0, oldConditionTrackerMap,
+ oldConditionTrackers, newConditionTrackerMap,
+ replacedMatchers, conditionsToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(conditionsToUpdate[0], UPDATE_PRESERVE);
}
@@ -504,9 +538,10 @@
vector<bool> cycleTracker(1, false);
unordered_map<int64_t, int> newConditionTrackerMap;
newConditionTrackerMap[predicate.id()] = 0;
- EXPECT_TRUE(determineConditionUpdateStatus(config, 0, oldConditionTrackerMap,
- oldConditionTrackers, newConditionTrackerMap,
- replacedMatchers, conditionsToUpdate, cycleTracker));
+ EXPECT_EQ(determineConditionUpdateStatus(config, 0, oldConditionTrackerMap,
+ oldConditionTrackers, newConditionTrackerMap,
+ replacedMatchers, conditionsToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(conditionsToUpdate[0], UPDATE_REPLACE);
}
@@ -531,9 +566,10 @@
vector<bool> cycleTracker(1, false);
unordered_map<int64_t, int> newConditionTrackerMap;
newConditionTrackerMap[predicate.id()] = 0;
- EXPECT_TRUE(determineConditionUpdateStatus(config, 0, oldConditionTrackerMap,
- oldConditionTrackers, newConditionTrackerMap,
- replacedMatchers, conditionsToUpdate, cycleTracker));
+ EXPECT_EQ(determineConditionUpdateStatus(config, 0, oldConditionTrackerMap,
+ oldConditionTrackers, newConditionTrackerMap,
+ replacedMatchers, conditionsToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(conditionsToUpdate[0], UPDATE_REPLACE);
}
@@ -573,9 +609,10 @@
vector<UpdateStatus> conditionsToUpdate(3, UPDATE_UNKNOWN);
vector<bool> cycleTracker(3, false);
// Only update the combination. It should recurse the two child predicates and preserve all 3.
- EXPECT_TRUE(determineConditionUpdateStatus(newConfig, 0, oldConditionTrackerMap,
- oldConditionTrackers, newConditionTrackerMap,
- replacedMatchers, conditionsToUpdate, cycleTracker));
+ EXPECT_EQ(determineConditionUpdateStatus(newConfig, 0, oldConditionTrackerMap,
+ oldConditionTrackers, newConditionTrackerMap,
+ replacedMatchers, conditionsToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(conditionsToUpdate[0], UPDATE_PRESERVE);
EXPECT_EQ(conditionsToUpdate[1], UPDATE_PRESERVE);
EXPECT_EQ(conditionsToUpdate[2], UPDATE_PRESERVE);
@@ -619,9 +656,10 @@
vector<UpdateStatus> conditionsToUpdate(3, UPDATE_UNKNOWN);
vector<bool> cycleTracker(3, false);
// Only update the combination. The simple conditions should not be evaluated.
- EXPECT_TRUE(determineConditionUpdateStatus(newConfig, 0, oldConditionTrackerMap,
- oldConditionTrackers, newConditionTrackerMap,
- replacedMatchers, conditionsToUpdate, cycleTracker));
+ EXPECT_EQ(determineConditionUpdateStatus(newConfig, 0, oldConditionTrackerMap,
+ oldConditionTrackers, newConditionTrackerMap,
+ replacedMatchers, conditionsToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(conditionsToUpdate[0], UPDATE_REPLACE);
EXPECT_EQ(conditionsToUpdate[1], UPDATE_UNKNOWN);
EXPECT_EQ(conditionsToUpdate[2], UPDATE_UNKNOWN);
@@ -664,9 +702,10 @@
vector<UpdateStatus> conditionsToUpdate(3, UPDATE_UNKNOWN);
vector<bool> cycleTracker(3, false);
// Only update the combination. Simple2 and combination1 must be evaluated.
- EXPECT_TRUE(determineConditionUpdateStatus(newConfig, 0, oldConditionTrackerMap,
- oldConditionTrackers, newConditionTrackerMap,
- replacedMatchers, conditionsToUpdate, cycleTracker));
+ EXPECT_EQ(determineConditionUpdateStatus(newConfig, 0, oldConditionTrackerMap,
+ oldConditionTrackers, newConditionTrackerMap,
+ replacedMatchers, conditionsToUpdate, cycleTracker),
+ nullopt);
EXPECT_EQ(conditionsToUpdate[0], UPDATE_REPLACE);
EXPECT_EQ(conditionsToUpdate[1], UPDATE_REPLACE);
}
@@ -799,12 +838,13 @@
unordered_map<int64_t, int> newConditionTrackerMap;
vector<sp<ConditionTracker>> newConditionTrackers;
unordered_map<int, vector<int>> trackerToConditionMap;
- std::vector<ConditionState> conditionCache;
- std::set<int64_t> replacedConditions;
- EXPECT_TRUE(updateConditions(key, newConfig, newAtomMatchingTrackerMap, replacedMatchers,
- oldConditionTrackerMap, oldConditionTrackers,
- newConditionTrackerMap, newConditionTrackers,
- trackerToConditionMap, conditionCache, replacedConditions));
+ vector<ConditionState> conditionCache;
+ set<int64_t> replacedConditions;
+ EXPECT_EQ(updateConditions(key, newConfig, newAtomMatchingTrackerMap, replacedMatchers,
+ oldConditionTrackerMap, oldConditionTrackers, newConditionTrackerMap,
+ newConditionTrackers, trackerToConditionMap, conditionCache,
+ replacedConditions),
+ nullopt);
unordered_map<int64_t, int> expectedConditionTrackerMap = {
{simple1Id, simple1Index}, {simple2Id, simple2Index},
@@ -935,8 +975,9 @@
unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
map<int64_t, uint64_t> newStateProtoHashes;
set<int64_t> replacedStates;
- EXPECT_TRUE(updateStates(newConfig, oldStateHashes, stateAtomIdMap, allStateGroupMaps,
- newStateProtoHashes, replacedStates));
+ EXPECT_EQ(updateStates(newConfig, oldStateHashes, stateAtomIdMap, allStateGroupMaps,
+ newStateProtoHashes, replacedStates),
+ nullopt);
EXPECT_THAT(replacedStates, ContainerEq(set({state1Id, state3Id})));
unordered_map<int64_t, int> expectedStateAtomIdMap = {
@@ -974,10 +1015,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_PRESERVE);
}
@@ -1010,10 +1052,11 @@
unordered_map<int64_t, int> metricToActivationMap = {{12345, 0}};
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1039,10 +1082,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1068,10 +1112,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1104,10 +1149,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{linkPredicate.id()},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{linkPredicate.id()},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1139,10 +1185,11 @@
unordered_map<int64_t, int> metricToActivationMap = {{12345, 0}};
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {startMatcher.id()}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {startMatcher.id()}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1172,10 +1219,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_PRESERVE);
}
@@ -1205,10 +1253,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1235,10 +1284,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1265,10 +1315,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1295,10 +1346,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{sliceState.id()}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{sliceState.id()}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1321,10 +1373,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_PRESERVE);
}
@@ -1343,10 +1396,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1362,10 +1416,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1388,10 +1443,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1409,10 +1465,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {triggerEvent.id()}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {triggerEvent.id()}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1437,10 +1494,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_PRESERVE);
}
@@ -1461,10 +1519,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap, /*replacedMatchers*/ {},
- /*replacedConditions=*/{}, /*replacedStates=*/{},
- metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap, /*replacedMatchers*/ {},
+ /*replacedConditions=*/{}, /*replacedStates=*/{},
+ metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1483,10 +1542,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{what.id()},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{what.id()},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1507,10 +1567,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{condition.id()},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{condition.id()},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1533,10 +1594,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{sliceState.id()}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{sliceState.id()}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1560,10 +1622,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_PRESERVE);
}
@@ -1580,10 +1643,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1597,10 +1661,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1621,10 +1686,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1642,10 +1708,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate(1, UPDATE_UNKNOWN);
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{sliceState.id()}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{sliceState.id()}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1667,10 +1734,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate{UPDATE_UNKNOWN};
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_PRESERVE);
}
@@ -1688,10 +1756,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate{UPDATE_UNKNOWN};
- EXPECT_TRUE(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
- metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(config, oldMetricProducerMap, oldMetricProducers,
+ metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1705,10 +1774,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate{UPDATE_UNKNOWN};
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {whatMatcher.id()}, /*replacedConditions=*/{},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1730,10 +1800,11 @@
unordered_map<int64_t, int> metricToActivationMap;
vector<UpdateStatus> metricsToUpdate{UPDATE_UNKNOWN};
- EXPECT_TRUE(determineAllMetricUpdateStatuses(
- config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
- /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
- /*replacedStates=*/{}, metricsToUpdate));
+ EXPECT_EQ(determineAllMetricUpdateStatuses(
+ config, oldMetricProducerMap, oldMetricProducers, metricToActivationMap,
+ /*replacedMatchers*/ {}, /*replacedConditions=*/{predicate.id()},
+ /*replacedStates=*/{}, metricsToUpdate),
+ nullopt);
EXPECT_EQ(metricsToUpdate[0], UPDATE_REPLACE);
}
@@ -1886,15 +1957,17 @@
unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
- EXPECT_TRUE(updateMetrics(
- key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, replacedMatchers,
- newAtomMatchingTrackers, newConditionTrackerMap, replacedConditions,
- newConditionTrackers, conditionCache, /*stateAtomIdMap=*/{}, /*allStateGroupMaps=*/{},
- /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
- newMetricProducers, conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, replacedConditions, newConditionTrackers,
+ conditionCache, /*stateAtomIdMap=*/{}, /*allStateGroupMaps=*/{},
+ /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ nullopt);
unordered_map<int64_t, int> expectedMetricProducerMap = {
{event1Id, event1Index}, {event2Id, event2Index}, {event3Id, event3Index},
@@ -2104,7 +2177,7 @@
unordered_map<int64_t, int> stateAtomIdMap;
unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
map<int64_t, uint64_t> stateProtoHashes;
- EXPECT_TRUE(initStates(newConfig, stateAtomIdMap, allStateGroupMaps, stateProtoHashes));
+ EXPECT_EQ(initStates(newConfig, stateAtomIdMap, allStateGroupMaps, stateProtoHashes), nullopt);
EXPECT_EQ(stateAtomIdMap[state2Id], util::BATTERY_SAVER_MODE_STATE_CHANGED);
// Output data structures to validate.
@@ -2117,15 +2190,17 @@
unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
- EXPECT_TRUE(updateMetrics(
- key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, replacedMatchers,
- newAtomMatchingTrackers, newConditionTrackerMap, /*replacedConditions=*/{},
- newConditionTrackers, conditionCache, stateAtomIdMap, allStateGroupMaps, replacedStates,
- oldMetricProducerMap, oldMetricProducers, newMetricProducerMap, newMetricProducers,
- conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, /*replacedConditions=*/{}, newConditionTrackers,
+ conditionCache, stateAtomIdMap, allStateGroupMaps, replacedStates,
+ oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
+ newMetricProducers, conditionToMetricMap, trackerToMetricMap,
+ noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ nullopt);
unordered_map<int64_t, int> expectedMetricProducerMap = {
{count1Id, count1Index}, {count2Id, count2Index}, {count3Id, count3Index},
@@ -2328,15 +2403,17 @@
unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
- EXPECT_TRUE(updateMetrics(
- key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, replacedMatchers,
- newAtomMatchingTrackers, newConditionTrackerMap, /*replacedConditions=*/{},
- newConditionTrackers, conditionCache, /*stateAtomIdMap=*/{}, /*allStateGroupMaps=*/{},
- /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
- newMetricProducers, conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, /*replacedConditions=*/{}, newConditionTrackers,
+ conditionCache, /*stateAtomIdMap=*/{}, /*allStateGroupMaps=*/{},
+ /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ nullopt);
unordered_map<int64_t, int> expectedMetricProducerMap = {
{gauge1Id, gauge1Index}, {gauge2Id, gauge2Index}, {gauge3Id, gauge3Index},
@@ -2601,16 +2678,18 @@
vector<Predicate> conditionProtos(5);
reverse_copy(config.predicate().begin(), config.predicate().end(), conditionProtos.begin());
for (int i = 0; i < newConditionTrackers.size(); i++) {
- EXPECT_TRUE(newConditionTrackers[i]->onConfigUpdated(
- conditionProtos, i, newConditionTrackers, newAtomMatchingTrackerMap,
- newConditionTrackerMap));
+ EXPECT_EQ(newConditionTrackers[i]->onConfigUpdated(conditionProtos, i, newConditionTrackers,
+ newAtomMatchingTrackerMap,
+ newConditionTrackerMap),
+ nullopt);
}
vector<bool> cycleTracker(5, false);
fill(conditionCache.begin(), conditionCache.end(), ConditionState::kNotEvaluated);
for (int i = 0; i < newConditionTrackers.size(); i++) {
- EXPECT_TRUE(newConditionTrackers[i]->init(conditionProtos, newConditionTrackers,
- newConditionTrackerMap, cycleTracker,
- conditionCache));
+ EXPECT_EQ(
+ newConditionTrackers[i]->init(conditionProtos, newConditionTrackers,
+ newConditionTrackerMap, cycleTracker, conditionCache),
+ nullopt);
}
// Predicate5 should be true since 2 uids have wakelocks
EXPECT_EQ(conditionCache, vector({kTrue, kFalse, kUnknown, kUnknown, kUnknown}));
@@ -2635,7 +2714,7 @@
unordered_map<int64_t, int> stateAtomIdMap;
unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
map<int64_t, uint64_t> stateProtoHashes;
- EXPECT_TRUE(initStates(newConfig, stateAtomIdMap, allStateGroupMaps, stateProtoHashes));
+ EXPECT_EQ(initStates(newConfig, stateAtomIdMap, allStateGroupMaps, stateProtoHashes), nullopt);
// Output data structures to validate.
unordered_map<int64_t, int> newMetricProducerMap;
@@ -2647,15 +2726,17 @@
unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
- EXPECT_TRUE(updateMetrics(
- key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, /*replacedMatchers=*/{},
- newAtomMatchingTrackers, newConditionTrackerMap, replacedConditions,
- newConditionTrackers, conditionCache, stateAtomIdMap, allStateGroupMaps, replacedStates,
- oldMetricProducerMap, oldMetricProducers, newMetricProducerMap, newMetricProducers,
- conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, /*replacedMatchers=*/{},
+ newAtomMatchingTrackers, newConditionTrackerMap, replacedConditions,
+ newConditionTrackers, conditionCache, stateAtomIdMap, allStateGroupMaps,
+ replacedStates, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ nullopt);
unordered_map<int64_t, int> expectedMetricProducerMap = {
{duration1Id, duration1Index}, {duration2Id, duration2Index},
@@ -2901,7 +2982,7 @@
unordered_map<int64_t, int> stateAtomIdMap;
unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
map<int64_t, uint64_t> stateProtoHashes;
- EXPECT_TRUE(initStates(newConfig, stateAtomIdMap, allStateGroupMaps, stateProtoHashes));
+ EXPECT_EQ(initStates(newConfig, stateAtomIdMap, allStateGroupMaps, stateProtoHashes), nullopt);
// Output data structures to validate.
unordered_map<int64_t, int> newMetricProducerMap;
@@ -2913,15 +2994,17 @@
unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
- EXPECT_TRUE(updateMetrics(
- key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, /*replacedMatchers=*/{},
- newAtomMatchingTrackers, newConditionTrackerMap, replacedConditions,
- newConditionTrackers, conditionCache, stateAtomIdMap, allStateGroupMaps, replacedStates,
- oldMetricProducerMap, oldMetricProducers, newMetricProducerMap, newMetricProducers,
- conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, /*replacedMatchers=*/{},
+ newAtomMatchingTrackers, newConditionTrackerMap, replacedConditions,
+ newConditionTrackers, conditionCache, stateAtomIdMap, allStateGroupMaps,
+ replacedStates, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ nullopt);
unordered_map<int64_t, int> expectedMetricProducerMap = {
{value1Id, value1Index}, {value2Id, value2Index}, {value3Id, value3Index},
@@ -3128,15 +3211,17 @@
unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
- EXPECT_TRUE(updateMetrics(
- key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, /*replacedMatchers=*/{},
- newAtomMatchingTrackers, newConditionTrackerMap, replacedConditions,
- newConditionTrackers, conditionCache, /*stateAtomIdMap=*/{}, /*allStateGroupMaps=*/{},
- /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
- newMetricProducers, conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(
+ key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, /*replacedMatchers=*/{}, newAtomMatchingTrackers,
+ newConditionTrackerMap, replacedConditions, newConditionTrackers,
+ conditionCache, /*stateAtomIdMap=*/{}, /*allStateGroupMaps=*/{},
+ /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation, replacedMetrics),
+ nullopt);
unordered_map<int64_t, int> expectedMetricProducerMap = {
{kll1Id, kll1Index}, {kll2Id, kll2Index}, {kll3Id, kll3Index},
@@ -3299,15 +3384,17 @@
unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
- EXPECT_TRUE(updateMetrics(
- key, config, /*timeBaseNs=*/123, /*currentTimeNs=*/12345, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, replacedMatchers,
- newAtomMatchingTrackers, newConditionTrackerMap, replacedConditions,
- newConditionTrackers, conditionCache, /*stateAtomIdMap=*/{}, /*allStateGroupMaps=*/{},
- /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
- newMetricProducers, conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(key, config, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, replacedConditions, newConditionTrackers,
+ conditionCache, /*stateAtomIdMap=*/{}, /*allStateGroupMaps=*/{},
+ /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ nullopt);
// Verify event activation/deactivation maps.
ASSERT_EQ(activationAtomTrackerToMetricMap.size(), 3);
@@ -3459,15 +3546,17 @@
unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
- EXPECT_TRUE(updateMetrics(
- key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap, replacedMatchers,
- newAtomMatchingTrackers, newConditionTrackerMap, /*replacedConditions=*/{},
- newConditionTrackers, conditionCache, /*stateAtomIdMap*/ {}, /*allStateGroupMaps=*/{},
- /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
- newMetricProducers, conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(key, newConfig, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, /*replacedConditions=*/{}, newConditionTrackers,
+ conditionCache, /*stateAtomIdMap*/ {}, /*allStateGroupMaps=*/{},
+ /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ nullopt);
unordered_map<int64_t, int> expectedMetricProducerMap = {
{countMetricId, countMetricIndex}, {durationMetricId, durationMetricIndex},
@@ -3552,8 +3641,9 @@
EXPECT_TRUE(initConfig(config));
UpdateStatus updateStatus = UPDATE_UNKNOWN;
- EXPECT_TRUE(determineAlertUpdateStatus(alert, oldAlertTrackerMap, oldAnomalyTrackers,
- /*replacedMetrics*/ {}, updateStatus));
+ EXPECT_EQ(determineAlertUpdateStatus(alert, oldAlertTrackerMap, oldAnomalyTrackers,
+ /*replacedMetrics*/ {}, updateStatus),
+ nullopt);
EXPECT_EQ(updateStatus, UPDATE_PRESERVE);
}
@@ -3570,8 +3660,9 @@
EXPECT_TRUE(initConfig(config));
UpdateStatus updateStatus = UPDATE_UNKNOWN;
- EXPECT_TRUE(determineAlertUpdateStatus(alert, oldAlertTrackerMap, oldAnomalyTrackers,
- /*replacedMetrics*/ {metric.id()}, updateStatus));
+ EXPECT_EQ(determineAlertUpdateStatus(alert, oldAlertTrackerMap, oldAnomalyTrackers,
+ /*replacedMetrics*/ {metric.id()}, updateStatus),
+ nullopt);
EXPECT_EQ(updateStatus, UPDATE_REPLACE);
}
@@ -3589,8 +3680,9 @@
alert.set_num_buckets(2);
UpdateStatus updateStatus = UPDATE_UNKNOWN;
- EXPECT_TRUE(determineAlertUpdateStatus(alert, oldAlertTrackerMap, oldAnomalyTrackers,
- /*replacedMetrics*/ {}, updateStatus));
+ EXPECT_EQ(determineAlertUpdateStatus(alert, oldAlertTrackerMap, oldAnomalyTrackers,
+ /*replacedMetrics*/ {}, updateStatus),
+ nullopt);
EXPECT_EQ(updateStatus, UPDATE_REPLACE);
}
@@ -3685,24 +3777,26 @@
vector<int> metricsWithActivation;
set<int64_t> replacedMetrics;
int64_t currentTimeNs = 12345;
- EXPECT_TRUE(updateMetrics(
- key, config, /*timeBaseNs=*/123, currentTimeNs, new StatsPullerManager(),
- oldAtomMatchingTrackerMap, oldAtomMatchingTrackerMap, /*replacedMatchers*/ {},
- oldAtomMatchingTrackers, oldConditionTrackerMap, /*replacedConditions=*/{},
- oldConditionTrackers, {ConditionState::kUnknown}, /*stateAtomIdMap*/ {},
- /*allStateGroupMaps=*/{},
- /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
- newMetricProducers, conditionToMetricMap, trackerToMetricMap, noReportMetricIds,
- activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
- metricsWithActivation, replacedMetrics));
+ EXPECT_EQ(updateMetrics(
+ key, config, /*timeBaseNs=*/123, currentTimeNs, new StatsPullerManager(),
+ oldAtomMatchingTrackerMap, oldAtomMatchingTrackerMap, /*replacedMatchers*/ {},
+ oldAtomMatchingTrackers, oldConditionTrackerMap, /*replacedConditions=*/{},
+ oldConditionTrackers, {ConditionState::kUnknown}, /*stateAtomIdMap*/ {},
+ /*allStateGroupMaps=*/{},
+ /*replacedStates=*/{}, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation, replacedMetrics),
+ nullopt);
EXPECT_EQ(replacedMetrics, set<int64_t>({countMetricId}));
unordered_map<int64_t, int> newAlertTrackerMap;
vector<sp<AnomalyTracker>> newAnomalyTrackers;
- EXPECT_TRUE(updateAlerts(config, currentTimeNs, newMetricProducerMap, replacedMetrics,
- oldAlertTrackerMap, oldAnomalyTrackers, anomalyAlarmMonitor,
- newMetricProducers, newAlertTrackerMap, newAnomalyTrackers));
+ EXPECT_EQ(updateAlerts(config, currentTimeNs, newMetricProducerMap, replacedMetrics,
+ oldAlertTrackerMap, oldAnomalyTrackers, anomalyAlarmMonitor,
+ newMetricProducers, newAlertTrackerMap, newAnomalyTrackers),
+ nullopt);
unordered_map<int64_t, int> expectedAlertMap = {
{alert1Id, alert1Index},
@@ -3797,8 +3891,9 @@
// Update time is 2 seconds after the base time.
int64_t currentTimeNs = timeBaseNs + 2 * NS_PER_SEC;
vector<sp<AlarmTracker>> newAlarmTrackers;
- EXPECT_TRUE(initAlarms(config, key, periodicAlarmMonitor, timeBaseNs, currentTimeNs,
- newAlarmTrackers));
+ EXPECT_EQ(initAlarms(config, key, periodicAlarmMonitor, timeBaseNs, currentTimeNs,
+ newAlarmTrackers),
+ nullopt);
ASSERT_EQ(newAlarmTrackers.size(), 3);
// Config is updated 2 seconds after statsd start
@@ -3828,8 +3923,9 @@
// Do another update 60 seconds after config creation time, after the offsets of each alarm.
currentTimeNs = timeBaseNs + 60 * NS_PER_SEC;
newAlarmTrackers.clear();
- EXPECT_TRUE(initAlarms(config, key, periodicAlarmMonitor, timeBaseNs, currentTimeNs,
- newAlarmTrackers));
+ EXPECT_EQ(initAlarms(config, key, periodicAlarmMonitor, timeBaseNs, currentTimeNs,
+ newAlarmTrackers),
+ nullopt);
ASSERT_EQ(newAlarmTrackers.size(), 3);
// Config is updated one minute after statsd start.
@@ -3839,6 +3935,213 @@
EXPECT_EQ(newAlarmTrackers[2]->getAlarmTimestampSec(), timeBaseNs / NS_PER_SEC + 10 + 10000);
}
+TEST_F(ConfigUpdateTest, TestMetricHasMultipleActivations) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ auto metric_activation1 = config.add_metric_activation();
+ metric_activation1->set_metric_id(metricId);
+ metric_activation1->set_activation_type(ACTIVATE_IMMEDIATELY);
+ EXPECT_TRUE(initConfig(config));
+
+ auto metric_activation2 = config.add_metric_activation();
+ metric_activation2->set_metric_id(metricId);
+ metric_activation2->set_activation_type(ACTIVATE_IMMEDIATELY);
+
+ unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
+ unordered_map<int64_t, int> newAtomMatchingTrackerMap;
+ unordered_map<int64_t, int> newConditionTrackerMap;
+ unordered_map<int64_t, int> newMetricProducerMap;
+ unordered_map<int64_t, int> stateAtomIdMap;
+ unordered_map<int, vector<int>> conditionToMetricMap;
+ unordered_map<int, vector<int>> trackerToMetricMap;
+ unordered_map<int, vector<int>> activationAtomTrackerToMetricMap;
+ unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
+ set<int64_t> noReportMetricIds;
+ vector<int> metricsWithActivation;
+ vector<sp<AtomMatchingTracker>> newAtomMatchingTrackers;
+ vector<sp<ConditionTracker>> newConditionTrackers;
+ vector<sp<MetricProducer>> newMetricProducers;
+ vector<ConditionState> conditionCache;
+ set<int64_t> replacedMetrics;
+ set<int64_t> replacedMatchers;
+ set<int64_t> replacedConditions;
+ set<int64_t> replacedStates;
+ EXPECT_EQ(updateMetrics(key, config, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, replacedConditions, newConditionTrackers,
+ conditionCache, stateAtomIdMap, allStateGroupMaps, replacedStates,
+ oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
+ newMetricProducers, conditionToMetricMap, trackerToMetricMap,
+ noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_HAS_MULTIPLE_ACTIVATIONS, metricId));
+}
+
+TEST_F(ConfigUpdateTest, TestNoReportMetricNotFound) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ config.add_no_report_metric(metricId);
+
+ unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
+ unordered_map<int64_t, int> newAtomMatchingTrackerMap;
+ unordered_map<int64_t, int> newConditionTrackerMap;
+ unordered_map<int64_t, int> newMetricProducerMap;
+ unordered_map<int64_t, int> stateAtomIdMap;
+ unordered_map<int, vector<int>> conditionToMetricMap;
+ unordered_map<int, vector<int>> trackerToMetricMap;
+ unordered_map<int, vector<int>> activationAtomTrackerToMetricMap;
+ unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
+ set<int64_t> noReportMetricIds;
+ vector<int> metricsWithActivation;
+ vector<sp<AtomMatchingTracker>> newAtomMatchingTrackers;
+ vector<sp<ConditionTracker>> newConditionTrackers;
+ vector<sp<MetricProducer>> newMetricProducers;
+ vector<ConditionState> conditionCache;
+ set<int64_t> replacedMetrics;
+ set<int64_t> replacedMatchers;
+ set<int64_t> replacedConditions;
+ set<int64_t> replacedStates;
+ EXPECT_EQ(updateMetrics(key, config, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, replacedConditions, newConditionTrackers,
+ conditionCache, stateAtomIdMap, allStateGroupMaps, replacedStates,
+ oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
+ newMetricProducers, conditionToMetricMap, trackerToMetricMap,
+ noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ InvalidConfigReason(INVALID_CONFIG_REASON_NO_REPORT_METRIC_NOT_FOUND, metricId));
+}
+
+TEST_F(ConfigUpdateTest, TestMetricSlicedStateAtomAllowedFromAnyUid) {
+ StatsdConfig config;
+ CountMetric* metric = config.add_count_metric();
+ *metric = createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ *config.add_state() = CreateScreenState();
+ metric->add_slice_by_state(StringToId("ScreenState"));
+ EXPECT_TRUE(initConfig(config));
+
+ config.add_whitelisted_atom_ids(util::SCREEN_STATE_CHANGED);
+
+ unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
+ unordered_map<int64_t, int> newAtomMatchingTrackerMap;
+ unordered_map<int64_t, int> newConditionTrackerMap;
+ unordered_map<int64_t, int> newMetricProducerMap;
+ unordered_map<int64_t, int> stateAtomIdMap;
+ unordered_map<int, vector<int>> conditionToMetricMap;
+ unordered_map<int, vector<int>> trackerToMetricMap;
+ unordered_map<int, vector<int>> activationAtomTrackerToMetricMap;
+ unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
+ set<int64_t> noReportMetricIds;
+ vector<int> metricsWithActivation;
+ vector<sp<AtomMatchingTracker>> newAtomMatchingTrackers;
+ vector<sp<ConditionTracker>> newConditionTrackers;
+ vector<sp<MetricProducer>> newMetricProducers;
+ vector<ConditionState> conditionCache;
+ set<int64_t> replacedMetrics;
+ set<int64_t> replacedMatchers;
+ set<int64_t> replacedConditions;
+ set<int64_t> replacedStates;
+
+ newAtomMatchingTrackerMap[StringToId("ScreenTurnedOn")] = 0;
+ stateAtomIdMap[StringToId("ScreenState")] = util::SCREEN_STATE_CHANGED;
+ EXPECT_EQ(
+ updateMetrics(
+ key, config, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap, newAtomMatchingTrackerMap,
+ replacedMatchers, newAtomMatchingTrackers, newConditionTrackerMap,
+ replacedConditions, newConditionTrackers, conditionCache, stateAtomIdMap,
+ allStateGroupMaps, replacedStates, oldMetricProducerMap, oldMetricProducers,
+ newMetricProducerMap, newMetricProducers, conditionToMetricMap,
+ trackerToMetricMap, noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation, replacedMetrics),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SLICED_STATE_ATOM_ALLOWED_FROM_ANY_UID,
+ StringToId("Count")));
+}
+
+TEST_F(ConfigUpdateTest, TestMatcherDuplicate) {
+ StatsdConfig config;
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ EXPECT_TRUE(initConfig(config));
+
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ unordered_map<int, vector<int>> newTagIds;
+ unordered_map<int64_t, int> newAtomMatchingTrackerMap;
+ vector<sp<AtomMatchingTracker>> newAtomMatchingTrackers;
+ set<int64_t> replacedMatchers;
+ EXPECT_EQ(updateAtomMatchingTrackers(
+ config, uidMap, oldAtomMatchingTrackerMap, oldAtomMatchingTrackers, newTagIds,
+ newAtomMatchingTrackerMap, newAtomMatchingTrackers, replacedMatchers),
+ createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_MATCHER_DUPLICATE,
+ StringToId("ScreenTurnedOn")));
+}
+
+TEST_F(ConfigUpdateTest, TestConditionDuplicate) {
+ StatsdConfig config;
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+ EXPECT_TRUE(initConfig(config));
+
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+
+ unordered_map<int64_t, int> newAtomMatchingTrackerMap;
+ unordered_map<int64_t, int> newConditionTrackerMap;
+ vector<sp<ConditionTracker>> newConditionTrackers;
+ unordered_map<int, vector<int>> trackerToConditionMap;
+ vector<ConditionState> conditionCache;
+ set<int64_t> replacedConditions;
+ set<int64_t> replacedMatchers;
+ EXPECT_EQ(updateConditions(key, config, newAtomMatchingTrackerMap, replacedMatchers,
+ oldConditionTrackerMap, oldConditionTrackers, newConditionTrackerMap,
+ newConditionTrackers, trackerToConditionMap, conditionCache,
+ replacedConditions),
+ createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_CONDITION_DUPLICATE,
+ StringToId("ScreenIsOn")));
+}
+
+TEST_F(ConfigUpdateTest, TestUpdateConfigNonEventMetricHasRestrictedDelegate) {
+ StatsdConfig config;
+ CountMetric* metric = config.add_count_metric();
+ config.set_restricted_metrics_delegate_package_name("com.android.app.test");
+
+ unordered_map<int64_t, unordered_map<int, int64_t>> allStateGroupMaps;
+ unordered_map<int64_t, int> newAtomMatchingTrackerMap;
+ unordered_map<int64_t, int> newConditionTrackerMap;
+ unordered_map<int64_t, int> newMetricProducerMap;
+ unordered_map<int64_t, int> stateAtomIdMap;
+ unordered_map<int, vector<int>> conditionToMetricMap;
+ unordered_map<int, vector<int>> trackerToMetricMap;
+ unordered_map<int, vector<int>> activationAtomTrackerToMetricMap;
+ unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
+ set<int64_t> noReportMetricIds;
+ vector<int> metricsWithActivation;
+ vector<sp<AtomMatchingTracker>> newAtomMatchingTrackers;
+ vector<sp<ConditionTracker>> newConditionTrackers;
+ vector<sp<MetricProducer>> newMetricProducers;
+ vector<ConditionState> conditionCache;
+ set<int64_t> replacedMetrics;
+ set<int64_t> replacedMatchers;
+ set<int64_t> replacedConditions;
+ set<int64_t> replacedStates;
+
+ EXPECT_EQ(updateMetrics(key, config, /*timeBaseNs=*/123, /*currentTimeNs=*/12345,
+ new StatsPullerManager(), oldAtomMatchingTrackerMap,
+ newAtomMatchingTrackerMap, replacedMatchers, newAtomMatchingTrackers,
+ newConditionTrackerMap, replacedConditions, newConditionTrackers,
+ conditionCache, stateAtomIdMap, allStateGroupMaps, replacedStates,
+ oldMetricProducerMap, oldMetricProducers, newMetricProducerMap,
+ newMetricProducers, conditionToMetricMap, trackerToMetricMap,
+ noReportMetricIds, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, metricsWithActivation,
+ replacedMetrics),
+ InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED));
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/metrics/parsing_utils/metrics_manager_util_test.cpp b/statsd/tests/metrics/parsing_utils/metrics_manager_util_test.cpp
index 85806e7..7dd695c 100644
--- a/statsd/tests/metrics/parsing_utils/metrics_manager_util_test.cpp
+++ b/statsd/tests/metrics/parsing_utils/metrics_manager_util_test.cpp
@@ -51,9 +51,43 @@
namespace {
const ConfigKey kConfigKey(0, 12345);
+const long timeBaseSec = 1000;
const long kAlertId = 3;
-const long timeBaseSec = 1000;
+sp<UidMap> uidMap = new UidMap();
+sp<StatsPullerManager> pullerManager = new StatsPullerManager();
+sp<AlarmMonitor> anomalyAlarmMonitor;
+sp<AlarmMonitor> periodicAlarmMonitor;
+unordered_map<int, vector<int>> allTagIdsToMatchersMap;
+vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
+unordered_map<int64_t, int> atomMatchingTrackerMap;
+vector<sp<ConditionTracker>> allConditionTrackers;
+unordered_map<int64_t, int> conditionTrackerMap;
+vector<sp<MetricProducer>> allMetricProducers;
+unordered_map<int64_t, int> metricProducerMap;
+vector<sp<AnomalyTracker>> allAnomalyTrackers;
+unordered_map<int64_t, int> alertTrackerMap;
+vector<sp<AlarmTracker>> allAlarmTrackers;
+unordered_map<int, vector<int>> conditionToMetricMap;
+unordered_map<int, vector<int>> trackerToMetricMap;
+unordered_map<int, vector<int>> trackerToConditionMap;
+unordered_map<int, vector<int>> activationAtomTrackerToMetricMap;
+unordered_map<int, vector<int>> deactivationAtomTrackerToMetricMap;
+vector<int> metricsWithActivation;
+map<int64_t, uint64_t> stateProtoHashes;
+set<int64_t> noReportMetricIds;
+
+optional<InvalidConfigReason> initConfig(const StatsdConfig& config) {
+ // initStatsdConfig returns nullopt if config is valid
+ return initStatsdConfig(
+ kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
+ timeBaseSec, timeBaseSec, allTagIdsToMatchersMap, allAtomMatchingTrackers,
+ atomMatchingTrackerMap, allConditionTrackers, conditionTrackerMap, allMetricProducers,
+ metricProducerMap, allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap,
+ trackerToMetricMap, trackerToConditionMap, activationAtomTrackerToMetricMap,
+ deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
+ stateProtoHashes, noReportMetricIds);
+}
StatsdConfig buildGoodConfig() {
StatsdConfig config;
@@ -63,7 +97,7 @@
eventMatcher->set_id(StringToId("SCREEN_IS_ON"));
SimpleAtomMatcher* simpleAtomMatcher = eventMatcher->mutable_simple_atom_matcher();
- simpleAtomMatcher->set_atom_id(2 /*SCREEN_STATE_CHANGE*/);
+ simpleAtomMatcher->set_atom_id(SCREEN_STATE_ATOM_ID);
simpleAtomMatcher->add_field_value_matcher()->set_field(
1 /*SCREEN_STATE_CHANGE__DISPLAY_STATE*/);
simpleAtomMatcher->mutable_field_value_matcher(0)->set_eq_int(
@@ -73,7 +107,7 @@
eventMatcher->set_id(StringToId("SCREEN_IS_OFF"));
simpleAtomMatcher = eventMatcher->mutable_simple_atom_matcher();
- simpleAtomMatcher->set_atom_id(2 /*SCREEN_STATE_CHANGE*/);
+ simpleAtomMatcher->set_atom_id(SCREEN_STATE_ATOM_ID);
simpleAtomMatcher->add_field_value_matcher()->set_field(
1 /*SCREEN_STATE_CHANGE__DISPLAY_STATE*/);
simpleAtomMatcher->mutable_field_value_matcher(0)->set_eq_int(
@@ -91,7 +125,7 @@
metric->set_id(3);
metric->set_what(StringToId("SCREEN_IS_ON"));
metric->set_bucket(ONE_MINUTE);
- metric->mutable_dimensions_in_what()->set_field(2 /*SCREEN_STATE_CHANGE*/);
+ metric->mutable_dimensions_in_what()->set_field(SCREEN_STATE_ATOM_ID);
metric->mutable_dimensions_in_what()->add_child()->set_field(1);
config.add_no_report_metric(3);
@@ -113,7 +147,7 @@
eventMatcher->set_id(StringToId("SCREEN_IS_ON"));
SimpleAtomMatcher* simpleAtomMatcher = eventMatcher->mutable_simple_atom_matcher();
- simpleAtomMatcher->set_atom_id(2 /*SCREEN_STATE_CHANGE*/);
+ simpleAtomMatcher->set_atom_id(SCREEN_STATE_ATOM_ID);
simpleAtomMatcher->add_field_value_matcher()->set_field(
1 /*SCREEN_STATE_CHANGE__DISPLAY_STATE*/);
simpleAtomMatcher->mutable_field_value_matcher(0)->set_eq_int(
@@ -135,14 +169,13 @@
StatsdConfig config;
config.set_id(12345);
- AtomMatcher* eventMatcher = config.add_atom_matcher();
- eventMatcher->set_id(StringToId("SCREEN_IS_ON"));
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
CountMetric* metric = config.add_count_metric();
metric->set_id(3);
- metric->set_what(StringToId("SCREEN_IS_ON"));
+ metric->set_what(StringToId("ScreenTurnedOn"));
metric->set_bucket(ONE_MINUTE);
- metric->mutable_dimensions_in_what()->set_field(2 /*SCREEN_STATE_CHANGE*/);
+ metric->mutable_dimensions_in_what()->set_field(SCREEN_STATE_ATOM_ID);
metric->mutable_dimensions_in_what()->add_child()->set_field(1);
auto alert = config.add_alert();
@@ -162,7 +195,7 @@
eventMatcher->set_id(StringToId("SCREEN_IS_ON"));
SimpleAtomMatcher* simpleAtomMatcher = eventMatcher->mutable_simple_atom_matcher();
- simpleAtomMatcher->set_atom_id(2 /*SCREEN_STATE_CHANGE*/);
+ simpleAtomMatcher->set_atom_id(SCREEN_STATE_ATOM_ID);
simpleAtomMatcher->add_field_value_matcher()->set_field(
1 /*SCREEN_STATE_CHANGE__DISPLAY_STATE*/);
simpleAtomMatcher->mutable_field_value_matcher(0)->set_eq_int(
@@ -246,7 +279,7 @@
eventMatcher->set_id(StringToId("SCREEN_IS_ON"));
SimpleAtomMatcher* simpleAtomMatcher = eventMatcher->mutable_simple_atom_matcher();
- simpleAtomMatcher->set_atom_id(2 /*SCREEN_STATE_CHANGE*/);
+ simpleAtomMatcher->set_atom_id(SCREEN_STATE_ATOM_ID);
simpleAtomMatcher->add_field_value_matcher()->set_field(
1 /*SCREEN_STATE_CHANGE__DISPLAY_STATE*/);
simpleAtomMatcher->mutable_field_value_matcher(0)->set_eq_int(
@@ -256,7 +289,7 @@
eventMatcher->set_id(StringToId("SCREEN_IS_OFF"));
simpleAtomMatcher = eventMatcher->mutable_simple_atom_matcher();
- simpleAtomMatcher->set_atom_id(2 /*SCREEN_STATE_CHANGE*/);
+ simpleAtomMatcher->set_atom_id(SCREEN_STATE_ATOM_ID);
simpleAtomMatcher->add_field_value_matcher()->set_field(
1 /*SCREEN_STATE_CHANGE__DISPLAY_STATE*/);
simpleAtomMatcher->mutable_field_value_matcher(0)->set_eq_int(
@@ -378,39 +411,34 @@
}
} // anonymous namespace
-TEST(MetricsManagerTest, TestInitialConditions) {
- sp<UidMap> uidMap = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> periodicAlarmMonitor;
- StatsdConfig config = buildConfigWithDifferentPredicates();
- set<int> allTagIds;
- vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
- unordered_map<int64_t, int> atomMatchingTrackerMap;
- vector<sp<ConditionTracker>> allConditionTrackers;
- unordered_map<int64_t, int> conditionTrackerMap;
- vector<sp<MetricProducer>> allMetricProducers;
- unordered_map<int64_t, int> metricProducerMap;
- std::vector<sp<AnomalyTracker>> allAnomalyTrackers;
- std::vector<sp<AlarmTracker>> allAlarmTrackers;
- unordered_map<int, std::vector<int>> conditionToMetricMap;
- unordered_map<int, std::vector<int>> trackerToMetricMap;
- unordered_map<int, std::vector<int>> trackerToConditionMap;
- unordered_map<int, std::vector<int>> activationAtomTrackerToMetricMap;
- unordered_map<int, std::vector<int>> deactivationAtomTrackerToMetricMap;
- unordered_map<int64_t, int> alertTrackerMap;
- vector<int> metricsWithActivation;
- map<int64_t, uint64_t> stateProtoHashes;
- std::set<int64_t> noReportMetricIds;
+class MetricsManagerUtilTest : public ::testing::Test {
+public:
+ void SetUp() override {
+ allTagIdsToMatchersMap.clear();
+ allAtomMatchingTrackers.clear();
+ atomMatchingTrackerMap.clear();
+ allConditionTrackers.clear();
+ conditionTrackerMap.clear();
+ allMetricProducers.clear();
+ metricProducerMap.clear();
+ allAnomalyTrackers.clear();
+ allAlarmTrackers.clear();
+ conditionToMetricMap.clear();
+ trackerToMetricMap.clear();
+ trackerToConditionMap.clear();
+ activationAtomTrackerToMetricMap.clear();
+ deactivationAtomTrackerToMetricMap.clear();
+ alertTrackerMap.clear();
+ metricsWithActivation.clear();
+ stateProtoHashes.clear();
+ noReportMetricIds.clear();
+ StateManager::getInstance().clear();
+ }
+};
- EXPECT_TRUE(initStatsdConfig(
- kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchingTrackers, atomMatchingTrackerMap,
- allConditionTrackers, conditionTrackerMap, allMetricProducers, metricProducerMap,
- allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap, trackerToMetricMap,
- trackerToConditionMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
- stateProtoHashes, noReportMetricIds));
+TEST_F(MetricsManagerUtilTest, TestInitialConditions) {
+ // initConfig returns nullopt if config is valid
+ EXPECT_EQ(initConfig(buildConfigWithDifferentPredicates()), nullopt);
ASSERT_EQ(4u, allMetricProducers.size());
ASSERT_EQ(5u, allConditionTrackers.size());
@@ -429,41 +457,17 @@
EXPECT_EQ(ConditionState::kUnknown, allMetricProducers[1]->mCondition);
EXPECT_EQ(ConditionState::kFalse, allMetricProducers[2]->mCondition);
EXPECT_EQ(ConditionState::kUnknown, allMetricProducers[3]->mCondition);
+
+ EXPECT_EQ(allTagIdsToMatchersMap.size(), 3);
+ EXPECT_EQ(allTagIdsToMatchersMap[SCREEN_STATE_ATOM_ID].size(), 2);
+ EXPECT_EQ(allTagIdsToMatchersMap[util::PLUGGED_STATE_CHANGED].size(), 2);
+ EXPECT_EQ(allTagIdsToMatchersMap[util::SUBSYSTEM_SLEEP_STATE].size(), 1);
}
-TEST(MetricsManagerTest, TestGoodConfig) {
- sp<UidMap> uidMap = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> periodicAlarmMonitor;
+TEST_F(MetricsManagerUtilTest, TestGoodConfig) {
StatsdConfig config = buildGoodConfig();
- set<int> allTagIds;
- vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
- unordered_map<int64_t, int> atomMatchingTrackerMap;
- vector<sp<ConditionTracker>> allConditionTrackers;
- unordered_map<int64_t, int> conditionTrackerMap;
- vector<sp<MetricProducer>> allMetricProducers;
- unordered_map<int64_t, int> metricProducerMap;
- std::vector<sp<AnomalyTracker>> allAnomalyTrackers;
- std::vector<sp<AlarmTracker>> allAlarmTrackers;
- unordered_map<int, std::vector<int>> conditionToMetricMap;
- unordered_map<int, std::vector<int>> trackerToMetricMap;
- unordered_map<int, std::vector<int>> trackerToConditionMap;
- unordered_map<int, std::vector<int>> activationAtomTrackerToMetricMap;
- unordered_map<int, std::vector<int>> deactivationAtomTrackerToMetricMap;
- unordered_map<int64_t, int> alertTrackerMap;
- vector<int> metricsWithActivation;
- map<int64_t, uint64_t> stateProtoHashes;
- std::set<int64_t> noReportMetricIds;
-
- EXPECT_TRUE(initStatsdConfig(
- kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchingTrackers, atomMatchingTrackerMap,
- allConditionTrackers, conditionTrackerMap, allMetricProducers, metricProducerMap,
- allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap, trackerToMetricMap,
- trackerToConditionMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
- stateProtoHashes, noReportMetricIds));
+ // initConfig returns nullopt if config is valid
+ EXPECT_EQ(initConfig(config), nullopt);
ASSERT_EQ(1u, allMetricProducers.size());
EXPECT_THAT(metricProducerMap, UnorderedElementsAre(Pair(config.count_metric(0).id(), 0)));
ASSERT_EQ(1u, allAnomalyTrackers.size());
@@ -473,247 +477,726 @@
EXPECT_EQ(alertTrackerMap.find(kAlertId)->second, 0);
}
-TEST(MetricsManagerTest, TestDimensionMetricsWithMultiTags) {
- sp<UidMap> uidMap = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> periodicAlarmMonitor;
- StatsdConfig config = buildDimensionMetricsWithMultiTags();
- set<int> allTagIds;
- vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
- unordered_map<int64_t, int> atomMatchingTrackerMap;
- vector<sp<ConditionTracker>> allConditionTrackers;
- unordered_map<int64_t, int> conditionTrackerMap;
- vector<sp<MetricProducer>> allMetricProducers;
- unordered_map<int64_t, int> metricProducerMap;
- std::vector<sp<AnomalyTracker>> allAnomalyTrackers;
- std::vector<sp<AlarmTracker>> allAlarmTrackers;
- unordered_map<int, std::vector<int>> conditionToMetricMap;
- unordered_map<int, std::vector<int>> trackerToMetricMap;
- unordered_map<int, std::vector<int>> trackerToConditionMap;
- unordered_map<int, std::vector<int>> activationAtomTrackerToMetricMap;
- unordered_map<int, std::vector<int>> deactivationAtomTrackerToMetricMap;
- unordered_map<int64_t, int> alertTrackerMap;
- vector<int> metricsWithActivation;
- map<int64_t, uint64_t> stateProtoHashes;
- std::set<int64_t> noReportMetricIds;
-
- EXPECT_FALSE(initStatsdConfig(
- kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchingTrackers, atomMatchingTrackerMap,
- allConditionTrackers, conditionTrackerMap, allMetricProducers, metricProducerMap,
- allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap, trackerToMetricMap,
- trackerToConditionMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
- stateProtoHashes, noReportMetricIds));
+TEST_F(MetricsManagerUtilTest, TestDimensionMetricsWithMultiTags) {
+ EXPECT_EQ(initConfig(buildDimensionMetricsWithMultiTags()),
+ createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_MATCHER_MORE_THAN_ONE_ATOM, /*metric id=*/3,
+ StringToId("BATTERY_LOW")));
}
-TEST(MetricsManagerTest, TestCircleLogMatcherDependency) {
- sp<UidMap> uidMap = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> periodicAlarmMonitor;
- StatsdConfig config = buildCircleMatchers();
- set<int> allTagIds;
- vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
- unordered_map<int64_t, int> atomMatchingTrackerMap;
- vector<sp<ConditionTracker>> allConditionTrackers;
- unordered_map<int64_t, int> conditionTrackerMap;
- vector<sp<MetricProducer>> allMetricProducers;
- unordered_map<int64_t, int> metricProducerMap;
- std::vector<sp<AnomalyTracker>> allAnomalyTrackers;
- std::vector<sp<AlarmTracker>> allAlarmTrackers;
- unordered_map<int, std::vector<int>> conditionToMetricMap;
- unordered_map<int, std::vector<int>> trackerToMetricMap;
- unordered_map<int, std::vector<int>> trackerToConditionMap;
- unordered_map<int, std::vector<int>> activationAtomTrackerToMetricMap;
- unordered_map<int, std::vector<int>> deactivationAtomTrackerToMetricMap;
- unordered_map<int64_t, int> alertTrackerMap;
- vector<int> metricsWithActivation;
- map<int64_t, uint64_t> stateProtoHashes;
- std::set<int64_t> noReportMetricIds;
+TEST_F(MetricsManagerUtilTest, TestCircleLogMatcherDependency) {
+ optional<InvalidConfigReason> expectedInvalidConfigReason =
+ createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_MATCHER_CYCLE,
+ StringToId("SCREEN_ON_OR_OFF"));
+ expectedInvalidConfigReason->matcherIds.push_back(StringToId("SCREEN_ON_OR_OFF"));
- EXPECT_FALSE(initStatsdConfig(
- kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchingTrackers, atomMatchingTrackerMap,
- allConditionTrackers, conditionTrackerMap, allMetricProducers, metricProducerMap,
- allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap, trackerToMetricMap,
- trackerToConditionMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
- stateProtoHashes, noReportMetricIds));
+ EXPECT_EQ(initConfig(buildCircleMatchers()), expectedInvalidConfigReason);
}
-TEST(MetricsManagerTest, TestMissingMatchers) {
- sp<UidMap> uidMap = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> periodicAlarmMonitor;
- StatsdConfig config = buildMissingMatchers();
- set<int> allTagIds;
- vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
- unordered_map<int64_t, int> atomMatchingTrackerMap;
- vector<sp<ConditionTracker>> allConditionTrackers;
- unordered_map<int64_t, int> conditionTrackerMap;
- vector<sp<MetricProducer>> allMetricProducers;
- unordered_map<int64_t, int> metricProducerMap;
- std::vector<sp<AnomalyTracker>> allAnomalyTrackers;
- std::vector<sp<AlarmTracker>> allAlarmTrackers;
- unordered_map<int, std::vector<int>> conditionToMetricMap;
- unordered_map<int, std::vector<int>> trackerToMetricMap;
- unordered_map<int, std::vector<int>> trackerToConditionMap;
- unordered_map<int, std::vector<int>> activationAtomTrackerToMetricMap;
- unordered_map<int, std::vector<int>> deactivationAtomTrackerToMetricMap;
- unordered_map<int64_t, int> alertTrackerMap;
- vector<int> metricsWithActivation;
- map<int64_t, uint64_t> stateProtoHashes;
- std::set<int64_t> noReportMetricIds;
- EXPECT_FALSE(initStatsdConfig(
- kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchingTrackers, atomMatchingTrackerMap,
- allConditionTrackers, conditionTrackerMap, allMetricProducers, metricProducerMap,
- allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap, trackerToMetricMap,
- trackerToConditionMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
- stateProtoHashes, noReportMetricIds));
+TEST_F(MetricsManagerUtilTest, TestMissingMatchers) {
+ optional<InvalidConfigReason> expectedInvalidConfigReason =
+ createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_MATCHER_CHILD_NOT_FOUND,
+ StringToId("SCREEN_ON_OR_OFF"));
+ expectedInvalidConfigReason->matcherIds.push_back(StringToId("ABC"));
+
+ EXPECT_EQ(initConfig(buildMissingMatchers()), expectedInvalidConfigReason);
}
-TEST(MetricsManagerTest, TestMissingPredicate) {
- sp<UidMap> uidMap = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> periodicAlarmMonitor;
- StatsdConfig config = buildMissingPredicate();
- set<int> allTagIds;
- vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
- unordered_map<int64_t, int> atomMatchingTrackerMap;
- vector<sp<ConditionTracker>> allConditionTrackers;
- unordered_map<int64_t, int> conditionTrackerMap;
- vector<sp<MetricProducer>> allMetricProducers;
- unordered_map<int64_t, int> metricProducerMap;
- std::vector<sp<AnomalyTracker>> allAnomalyTrackers;
- std::vector<sp<AlarmTracker>> allAlarmTrackers;
- unordered_map<int, std::vector<int>> conditionToMetricMap;
- unordered_map<int, std::vector<int>> trackerToMetricMap;
- unordered_map<int, std::vector<int>> trackerToConditionMap;
- unordered_map<int, std::vector<int>> activationAtomTrackerToMetricMap;
- unordered_map<int, std::vector<int>> deactivationAtomTrackerToMetricMap;
- unordered_map<int64_t, int> alertTrackerMap;
- vector<int> metricsWithActivation;
- map<int64_t, uint64_t> stateProtoHashes;
- std::set<int64_t> noReportMetricIds;
- EXPECT_FALSE(initStatsdConfig(
- kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchingTrackers, atomMatchingTrackerMap,
- allConditionTrackers, conditionTrackerMap, allMetricProducers, metricProducerMap,
- allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap, trackerToMetricMap,
- trackerToConditionMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
- stateProtoHashes, noReportMetricIds));
+TEST_F(MetricsManagerUtilTest, TestMissingPredicate) {
+ EXPECT_EQ(
+ initConfig(buildMissingPredicate()),
+ createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_METRIC_CONDITION_NOT_FOUND,
+ /*metric id=*/3, StringToId("SOME_CONDITION")));
}
-TEST(MetricsManagerTest, TestCirclePredicateDependency) {
- sp<UidMap> uidMap = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> periodicAlarmMonitor;
- StatsdConfig config = buildCirclePredicates();
- set<int> allTagIds;
- vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
- unordered_map<int64_t, int> atomMatchingTrackerMap;
- vector<sp<ConditionTracker>> allConditionTrackers;
- unordered_map<int64_t, int> conditionTrackerMap;
- vector<sp<MetricProducer>> allMetricProducers;
- unordered_map<int64_t, int> metricProducerMap;
- std::vector<sp<AnomalyTracker>> allAnomalyTrackers;
- std::vector<sp<AlarmTracker>> allAlarmTrackers;
- unordered_map<int, std::vector<int>> conditionToMetricMap;
- unordered_map<int, std::vector<int>> trackerToMetricMap;
- unordered_map<int, std::vector<int>> trackerToConditionMap;
- unordered_map<int, std::vector<int>> activationAtomTrackerToMetricMap;
- unordered_map<int, std::vector<int>> deactivationAtomTrackerToMetricMap;
- unordered_map<int64_t, int> alertTrackerMap;
- vector<int> metricsWithActivation;
- map<int64_t, uint64_t> stateProtoHashes;
- std::set<int64_t> noReportMetricIds;
+TEST_F(MetricsManagerUtilTest, TestCirclePredicateDependency) {
+ optional<InvalidConfigReason> expectedInvalidConfigReason =
+ createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_CONDITION_CYCLE,
+ StringToId("SCREEN_IS_EITHER_ON_OFF"));
+ expectedInvalidConfigReason->conditionIds.push_back(StringToId("SCREEN_IS_EITHER_ON_OFF"));
- EXPECT_FALSE(initStatsdConfig(
- kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchingTrackers, atomMatchingTrackerMap,
- allConditionTrackers, conditionTrackerMap, allMetricProducers, metricProducerMap,
- allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap, trackerToMetricMap,
- trackerToConditionMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
- stateProtoHashes, noReportMetricIds));
+ EXPECT_EQ(initConfig(buildCirclePredicates()), expectedInvalidConfigReason);
}
-TEST(MetricsManagerTest, testAlertWithUnknownMetric) {
- sp<UidMap> uidMap = new UidMap();
- sp<StatsPullerManager> pullerManager = new StatsPullerManager();
- sp<AlarmMonitor> anomalyAlarmMonitor;
- sp<AlarmMonitor> periodicAlarmMonitor;
- StatsdConfig config = buildAlertWithUnknownMetric();
- set<int> allTagIds;
- vector<sp<AtomMatchingTracker>> allAtomMatchingTrackers;
- unordered_map<int64_t, int> atomMatchingTrackerMap;
- vector<sp<ConditionTracker>> allConditionTrackers;
- unordered_map<int64_t, int> conditionTrackerMap;
- vector<sp<MetricProducer>> allMetricProducers;
- unordered_map<int64_t, int> metricProducerMap;
- std::vector<sp<AnomalyTracker>> allAnomalyTrackers;
- std::vector<sp<AlarmTracker>> allAlarmTrackers;
- unordered_map<int, std::vector<int>> conditionToMetricMap;
- unordered_map<int, std::vector<int>> trackerToMetricMap;
- unordered_map<int, std::vector<int>> trackerToConditionMap;
- unordered_map<int, std::vector<int>> activationAtomTrackerToMetricMap;
- unordered_map<int, std::vector<int>> deactivationAtomTrackerToMetricMap;
- unordered_map<int64_t, int> alertTrackerMap;
- vector<int> metricsWithActivation;
- map<int64_t, uint64_t> stateProtoHashes;
- std::set<int64_t> noReportMetricIds;
-
- EXPECT_FALSE(initStatsdConfig(
- kConfigKey, config, uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseSec, timeBaseSec, allTagIds, allAtomMatchingTrackers, atomMatchingTrackerMap,
- allConditionTrackers, conditionTrackerMap, allMetricProducers, metricProducerMap,
- allAnomalyTrackers, allAlarmTrackers, conditionToMetricMap, trackerToMetricMap,
- trackerToConditionMap, activationAtomTrackerToMetricMap,
- deactivationAtomTrackerToMetricMap, alertTrackerMap, metricsWithActivation,
- stateProtoHashes, noReportMetricIds));
+TEST_F(MetricsManagerUtilTest, TestAlertWithUnknownMetric) {
+ EXPECT_EQ(initConfig(buildAlertWithUnknownMetric()),
+ createInvalidConfigReasonWithAlert(INVALID_CONFIG_REASON_ALERT_METRIC_NOT_FOUND,
+ /*metric id=*/2, /*matcher id=*/3));
}
-TEST(MetricsManagerTest, TestCreateAtomMatchingTrackerInvalidMatcher) {
+TEST_F(MetricsManagerUtilTest, TestMetricWithMultipleActivations) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ auto metric_activation1 = config.add_metric_activation();
+ metric_activation1->set_metric_id(metricId);
+ metric_activation1->set_activation_type(ACTIVATE_IMMEDIATELY);
+ auto metric_activation2 = config.add_metric_activation();
+ metric_activation2->set_metric_id(metricId);
+ metric_activation2->set_activation_type(ACTIVATE_IMMEDIATELY);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_HAS_MULTIPLE_ACTIVATIONS, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestCountMetricMissingIdOrWhat) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ CountMetric* metric = config.add_count_metric();
+ metric->set_id(metricId);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestCountMetricConditionlinkNoCondition) {
+ StatsdConfig config;
+ CountMetric* metric = config.add_count_metric();
+ *metric = createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ auto link = metric->add_links();
+ link->set_condition(1);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION,
+ StringToId("Count")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestDurationMetricMissingIdOrWhat) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ DurationMetric* metric = config.add_duration_metric();
+ metric->set_id(metricId);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestDurationMetricConditionlinkNoCondition) {
+ StatsdConfig config;
+ DurationMetric* metric = config.add_duration_metric();
+ *metric = createDurationMetric(/*name=*/"Duration", /*what=*/StringToId("ScreenIsOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ *config.add_atom_matcher() = CreateScreenTurnedOffAtomMatcher();
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+
+ auto link = metric->add_links();
+ link->set_condition(1);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION,
+ StringToId("Duration")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestGaugeMetricMissingIdOrWhat) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ GaugeMetric* metric = config.add_gauge_metric();
+ metric->set_id(metricId);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestGaugeMetricConditionlinkNoCondition) {
+ StatsdConfig config;
+ GaugeMetric* metric = config.add_gauge_metric();
+ *metric = createGaugeMetric(/*name=*/"Gauge", /*what=*/StringToId("ScreenTurnedOn"),
+ /*samplingType=*/GaugeMetric_SamplingType_FIRST_N_SAMPLES,
+ /*condition=*/nullopt, /*triggerEvent=*/nullopt);
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ auto link = metric->add_links();
+ link->set_condition(1);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION,
+ StringToId("Gauge")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestEventMetricMissingIdOrWhat) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ EventMetric* metric = config.add_event_metric();
+ metric->set_id(metricId);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestEventMetricConditionlinkNoCondition) {
+ StatsdConfig config;
+ EventMetric* metric = config.add_event_metric();
+ *metric = createEventMetric(/*name=*/"Event", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt);
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ auto link = metric->add_links();
+ link->set_condition(1);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION,
+ StringToId("Event")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestNumericValueMetricMissingIdOrWhat) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ ValueMetric* metric = config.add_value_metric();
+ metric->set_id(metricId);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestNumericValueMetricConditionlinkNoCondition) {
+ StatsdConfig config;
+ ValueMetric* metric = config.add_value_metric();
+ *metric = createValueMetric(/*name=*/"NumericValue", /*what=*/CreateScreenTurnedOnAtomMatcher(),
+ /*valueField=*/2, /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ auto link = metric->add_links();
+ link->set_condition(1);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION,
+ StringToId("NumericValue")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestKllMetricMissingIdOrWhat) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ KllMetric* metric = config.add_kll_metric();
+ metric->set_id(metricId);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_MISSING_ID_OR_WHAT, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestKllMetricConditionlinkNoCondition) {
+ StatsdConfig config;
+ KllMetric* metric = config.add_kll_metric();
+ *metric = createKllMetric(/*name=*/"Kll", /*what=*/CreateScreenTurnedOnAtomMatcher(),
+ /*valueField=*/2, /*condition=*/nullopt);
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ auto link = metric->add_links();
+ link->set_condition(1);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_CONDITIONLINK_NO_CONDITION,
+ StringToId("Kll")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricMatcherNotFound) {
+ StatsdConfig config;
+ *config.add_count_metric() =
+ createCountMetric(/*name=*/"Count", /*what=*/StringToId("SOME MATCHER"),
+ /*condition=*/nullopt, /*states=*/{});
+
+ EXPECT_EQ(initConfig(config), createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_MATCHER_NOT_FOUND,
+ StringToId("Count"), StringToId("SOME MATCHER")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricConditionLinkNotFound) {
+ StatsdConfig config;
+ CountMetric* metric = config.add_count_metric();
+ *metric = createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/StringToId("ScreenIsOn"), /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+
+ auto link = metric->add_links();
+ link->set_condition(StringToId("SOME CONDITION"));
+
+ EXPECT_EQ(initConfig(config), createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_METRIC_CONDITION_LINK_NOT_FOUND,
+ StringToId("Count"), StringToId("SOME CONDITION")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricStateNotFound) {
+ StatsdConfig config;
+ *config.add_count_metric() =
+ createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt, /*states=*/{StringToId("SOME STATE")});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithState(INVALID_CONFIG_REASON_METRIC_STATE_NOT_FOUND,
+ StringToId("Count"), StringToId("SOME STATE")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricStatelinkNoState) {
+ StatsdConfig config;
+ CountMetric* metric = config.add_count_metric();
+ *metric = createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ auto link = metric->add_state_link();
+ link->set_state_atom_id(2);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_STATELINK_NO_STATE,
+ StringToId("Count")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricBadThreshold) {
+ StatsdConfig config;
+ CountMetric* metric = config.add_count_metric();
+ *metric = createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ metric->mutable_threshold()->set_lt_float(1.0);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_BAD_THRESHOLD, StringToId("Count")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricActivationMatcherNotFound) {
+ StatsdConfig config;
+ *config.add_count_metric() =
+ createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ auto metric_activation = config.add_metric_activation();
+ metric_activation->set_metric_id(StringToId("Count"));
+ metric_activation->set_activation_type(ACTIVATE_IMMEDIATELY);
+ auto event_activation = metric_activation->add_event_activation();
+
+ event_activation->set_atom_matcher_id(StringToId("SOME_MATCHER"));
+
+ EXPECT_EQ(initConfig(config), createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_ACTIVATION_MATCHER_NOT_FOUND,
+ StringToId("Count"), StringToId("SOME_MATCHER")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricDeactivationMatcherNotFound) {
+ StatsdConfig config;
+ *config.add_count_metric() =
+ createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ auto metric_activation = config.add_metric_activation();
+ metric_activation->set_metric_id(StringToId("Count"));
+ metric_activation->set_activation_type(ACTIVATE_IMMEDIATELY);
+ auto event_activation = metric_activation->add_event_activation();
+ event_activation->set_atom_matcher_id(StringToId("ScreenTurnedOn"));
+
+ event_activation->set_deactivation_atom_matcher_id(StringToId("SOME_MATCHER"));
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_METRIC_DEACTIVATION_MATCHER_NOT_FOUND,
+ StringToId("Count"), StringToId("SOME_MATCHER")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricSlicedStateAtomAllowedFromAnyUid) {
+ StatsdConfig config;
+ CountMetric* metric = config.add_count_metric();
+ *metric = createCountMetric(/*name=*/"Count", /*what=*/StringToId("ScreenTurnedOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ *config.add_state() = CreateScreenState();
+ metric->add_slice_by_state(StringToId("ScreenState"));
+ config.add_whitelisted_atom_ids(util::SCREEN_STATE_CHANGED);
+
+ EXPECT_EQ(
+ initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SLICED_STATE_ATOM_ALLOWED_FROM_ANY_UID,
+ StringToId("Count")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestDurationMetricWhatNotSimple) {
+ StatsdConfig config;
+ *config.add_duration_metric() =
+ createDurationMetric(/*name=*/"Duration", /*what=*/StringToId("ScreenIsEitherOnOff"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+ *config.add_predicate() = CreateScreenIsOffPredicate();
+
+ auto condition = config.add_predicate();
+ condition->set_id(StringToId("ScreenIsEitherOnOff"));
+ Predicate_Combination* combination = condition->mutable_combination();
+ combination->set_operation(LogicalOperation::OR);
+ combination->add_predicate(StringToId("ScreenIsOn"));
+ combination->add_predicate(StringToId("ScreenIsOff"));
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_DURATION_METRIC_WHAT_NOT_SIMPLE, StringToId("Duration"),
+ StringToId("ScreenIsEitherOnOff")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestDurationMetricWhatNotFound) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ DurationMetric* metric = config.add_duration_metric();
+ metric->set_id(metricId);
+
+ metric->set_what(StringToId("SOME WHAT"));
+
+ EXPECT_EQ(initConfig(config), createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_DURATION_METRIC_WHAT_NOT_FOUND,
+ metricId, StringToId("SOME WHAT")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestDurationMetricMissingStart) {
+ StatsdConfig config;
+ *config.add_duration_metric() =
+ createDurationMetric(/*name=*/"Duration", /*what=*/StringToId("SCREEN_IS_ON"),
+ /*condition=*/nullopt, /*states=*/{});
+ auto condition = config.add_predicate();
+ condition->set_id(StringToId("SCREEN_IS_ON"));
+
+ SimplePredicate* simplePredicate = condition->mutable_simple_predicate();
+ simplePredicate->set_stop(StringToId("SCREEN_IS_OFF"));
+
+ EXPECT_EQ(initConfig(config), createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_DURATION_METRIC_MISSING_START,
+ StringToId("Duration"), StringToId("SCREEN_IS_ON")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestDurationMetricMaxSparseHasSpliceByState) {
+ StatsdConfig config;
+ DurationMetric* metric = config.add_duration_metric();
+ *metric = createDurationMetric(/*name=*/"Duration", /*what=*/StringToId("ScreenIsOn"),
+ /*condition=*/nullopt, /*states=*/{});
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ *config.add_atom_matcher() = CreateScreenTurnedOffAtomMatcher();
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+ *config.add_state() = CreateScreenState();
+
+ metric->add_slice_by_state(StringToId("ScreenState"));
+ metric->set_aggregation_type(DurationMetric::MAX_SPARSE);
+
+ EXPECT_EQ(
+ initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_DURATION_METRIC_MAX_SPARSE_HAS_SLICE_BY_STATE,
+ StringToId("Duration")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestValueMetricMissingValueField) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ ValueMetric* metric = config.add_value_metric();
+ metric->set_id(metricId);
+ metric->set_what(1);
+
+ EXPECT_EQ(
+ initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_VALUE_METRIC_MISSING_VALUE_FIELD, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestValueMetricValueFieldHasPositionAll) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ ValueMetric* metric = config.add_value_metric();
+ metric->set_id(metricId);
+ metric->set_what(1);
+
+ metric->mutable_value_field()->set_position(ALL);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_VALUE_METRIC_VALUE_FIELD_HAS_POSITION_ALL,
+ metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestValueMetricHasIncorrectValueField) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ ValueMetric* metric = config.add_value_metric();
+ metric->set_id(metricId);
+ metric->set_what(1);
+
+ metric->mutable_value_field()->set_position(ANY);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_VALUE_METRIC_HAS_INCORRECT_VALUE_FIELD,
+ metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestKllMetricMissingKllField) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ KllMetric* metric = config.add_kll_metric();
+ metric->set_id(metricId);
+ metric->set_what(1);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_KLL_METRIC_MISSING_KLL_FIELD, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestKllMetricKllFieldHasPositionAll) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ KllMetric* metric = config.add_kll_metric();
+ metric->set_id(metricId);
+ metric->set_what(1);
+
+ metric->mutable_kll_field()->set_position(ALL);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_KLL_METRIC_KLL_FIELD_HAS_POSITION_ALL,
+ metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestKllMetricHasIncorrectKllField) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ KllMetric* metric = config.add_kll_metric();
+ metric->set_id(metricId);
+ metric->set_what(1);
+
+ metric->mutable_kll_field()->set_position(ANY);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_KLL_METRIC_HAS_INCORRECT_KLL_FIELD,
+ metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestGaugeMetricIncorrectFieldFilter) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ GaugeMetric* metric = config.add_gauge_metric();
+ metric->set_id(metricId);
+ metric->set_what(1);
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_GAUGE_METRIC_INCORRECT_FIELD_FILTER,
+ metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestGaugeMetricTriggerNoPullAtom) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ GaugeMetric* metric = config.add_gauge_metric();
+ metric->set_id(metricId);
+ metric->set_what(StringToId("ScreenTurnedOn"));
+ metric->mutable_gauge_fields_filter()->set_include_all(true);
+ metric->set_trigger_event(1);
+
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ EXPECT_EQ(
+ initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_GAUGE_METRIC_TRIGGER_NO_PULL_ATOM, metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestGaugeMetricTriggerNoFirstNSamples) {
+ StatsdConfig config;
+ int64_t metricId = 1;
+ GaugeMetric* metric = config.add_gauge_metric();
+ metric->set_id(metricId);
+ metric->set_what(StringToId("Matcher"));
+ *config.add_atom_matcher() =
+ CreateSimpleAtomMatcher(/*name=*/"Matcher", /*atomId=*/util::SUBSYSTEM_SLEEP_STATE);
+
+ metric->mutable_gauge_fields_filter()->set_include_all(true);
+ metric->set_trigger_event(StringToId("Matcher"));
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_GAUGE_METRIC_TRIGGER_NO_FIRST_N_SAMPLES,
+ metricId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMatcherDuplicate) {
+ StatsdConfig config;
+
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithMatcher(INVALID_CONFIG_REASON_MATCHER_DUPLICATE,
+ StringToId("ScreenTurnedOn")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMatcherNoOperation) {
+ StatsdConfig config;
+ int64_t matcherId = 1;
+
+ AtomMatcher* matcher = config.add_atom_matcher();
+ matcher->set_id(matcherId);
+ matcher->mutable_combination()->add_matcher(StringToId("ScreenTurnedOn"));
+
+ EXPECT_EQ(initConfig(config), createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_NO_OPERATION, matcherId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMatcherNotOperationIsNotUnary) {
+ StatsdConfig config;
+ int64_t matcherId = 1;
+ *config.add_atom_matcher() = CreateScreenTurnedOnAtomMatcher();
+ *config.add_atom_matcher() = CreateScreenTurnedOffAtomMatcher();
+
+ AtomMatcher* matcher = config.add_atom_matcher();
+ matcher->set_id(matcherId);
+ matcher->mutable_combination()->set_operation(LogicalOperation::NOT);
+ matcher->mutable_combination()->add_matcher(StringToId("ScreenTurnedOn"));
+ matcher->mutable_combination()->add_matcher(StringToId("ScreenTurnedOff"));
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_NOT_OPERATION_IS_NOT_UNARY, matcherId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestConditionChildNotFound) {
+ StatsdConfig config;
+ int64_t conditionId = 1;
+ int64_t childConditionId = 2;
+
+ Predicate* condition = config.add_predicate();
+ condition->set_id(conditionId);
+ condition->mutable_combination()->set_operation(LogicalOperation::NOT);
+ condition->mutable_combination()->add_predicate(childConditionId);
+
+ optional<InvalidConfigReason> expectedInvalidConfigReason =
+ createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_CONDITION_CHILD_NOT_FOUND,
+ conditionId);
+ expectedInvalidConfigReason->conditionIds.push_back(childConditionId);
+ EXPECT_EQ(initConfig(config), expectedInvalidConfigReason);
+}
+
+TEST_F(MetricsManagerUtilTest, TestConditionDuplicate) {
+ StatsdConfig config;
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_CONDITION_DUPLICATE,
+ StringToId("ScreenIsOn")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestConditionNoOperation) {
+ StatsdConfig config;
+ int64_t conditionId = 1;
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+
+ Predicate* condition = config.add_predicate();
+ condition->set_id(conditionId);
+ condition->mutable_combination()->add_predicate(StringToId("ScreenIsOn"));
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithPredicate(INVALID_CONFIG_REASON_CONDITION_NO_OPERATION,
+ conditionId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestConditionNotOperationIsNotUnary) {
+ StatsdConfig config;
+ int64_t conditionId = 1;
+ *config.add_predicate() = CreateScreenIsOnPredicate();
+ *config.add_predicate() = CreateScreenIsOffPredicate();
+
+ Predicate* condition = config.add_predicate();
+ condition->set_id(conditionId);
+ condition->mutable_combination()->set_operation(LogicalOperation::NOT);
+ condition->mutable_combination()->add_predicate(StringToId("ScreenIsOn"));
+ condition->mutable_combination()->add_predicate(StringToId("ScreenIsOff"));
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_NOT_OPERATION_IS_NOT_UNARY, conditionId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestSubscriptionRuleNotFoundAlert) {
+ StatsdConfig config;
+ int64_t alertId = 1;
+ *config.add_subscription() = createSubscription("Subscription", Subscription::ALERT, alertId);
+
+ EXPECT_EQ(initConfig(config), createInvalidConfigReasonWithSubscriptionAndAlert(
+ INVALID_CONFIG_REASON_SUBSCRIPTION_RULE_NOT_FOUND,
+ StringToId("Subscription"), alertId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestSubscriptionRuleNotFoundAlarm) {
+ StatsdConfig config;
+ int64_t alarmId = 1;
+ *config.add_subscription() = createSubscription("Subscription", Subscription::ALARM, alarmId);
+
+ EXPECT_EQ(initConfig(config), createInvalidConfigReasonWithSubscriptionAndAlarm(
+ INVALID_CONFIG_REASON_SUBSCRIPTION_RULE_NOT_FOUND,
+ StringToId("Subscription"), alarmId));
+}
+
+TEST_F(MetricsManagerUtilTest, TestSubscriptionSubscriberInfoMissing) {
+ StatsdConfig config;
+ Subscription subscription =
+ createSubscription("Subscription", Subscription::ALERT, /*alert id=*/1);
+ subscription.clear_subscriber_information();
+ *config.add_subscription() = subscription;
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithSubscription(
+ INVALID_CONFIG_REASON_SUBSCRIPTION_SUBSCRIBER_INFO_MISSING,
+ StringToId("Subscription")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestAlarmPeriodLessThanOrEqualZero) {
+ StatsdConfig config;
+ *config.add_alarm() = createAlarm("Alarm", /*offset=*/1, /*period=*/-1);
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithAlarm(
+ INVALID_CONFIG_REASON_ALARM_PERIOD_LESS_THAN_OR_EQUAL_ZERO,
+ StringToId("Alarm")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestAlarmOffsetLessThanOrEqualZero) {
+ StatsdConfig config;
+ *config.add_alarm() = createAlarm("Alarm", /*offset=*/-1, /*period=*/1);
+
+ EXPECT_EQ(initConfig(config),
+ createInvalidConfigReasonWithAlarm(
+ INVALID_CONFIG_REASON_ALARM_OFFSET_LESS_THAN_OR_EQUAL_ZERO,
+ StringToId("Alarm")));
+}
+
+TEST_F(MetricsManagerUtilTest, TestCreateAtomMatchingTrackerInvalidMatcher) {
sp<UidMap> uidMap = new UidMap();
AtomMatcher matcher;
// Matcher has no contents_case (simple/combination), so it is invalid.
matcher.set_id(21);
- EXPECT_EQ(createAtomMatchingTracker(matcher, 0, uidMap), nullptr);
+ optional<InvalidConfigReason> invalidConfigReason;
+ EXPECT_EQ(createAtomMatchingTracker(matcher, 0, uidMap, invalidConfigReason), nullptr);
+ EXPECT_EQ(invalidConfigReason,
+ createInvalidConfigReasonWithMatcher(
+ INVALID_CONFIG_REASON_MATCHER_MALFORMED_CONTENTS_CASE, matcher.id()));
}
-TEST(MetricsManagerTest, TestCreateAtomMatchingTrackerSimple) {
+TEST_F(MetricsManagerUtilTest, TestCreateAtomMatchingTrackerSimple) {
int index = 1;
int64_t id = 123;
sp<UidMap> uidMap = new UidMap();
AtomMatcher matcher;
matcher.set_id(id);
SimpleAtomMatcher* simpleAtomMatcher = matcher.mutable_simple_atom_matcher();
- simpleAtomMatcher->set_atom_id(util::SCREEN_STATE_CHANGED);
+ simpleAtomMatcher->set_atom_id(SCREEN_STATE_ATOM_ID);
simpleAtomMatcher->add_field_value_matcher()->set_field(
1 /*SCREEN_STATE_CHANGE__DISPLAY_STATE*/);
simpleAtomMatcher->mutable_field_value_matcher(0)->set_eq_int(
android::view::DisplayStateEnum::DISPLAY_STATE_ON);
- sp<AtomMatchingTracker> tracker = createAtomMatchingTracker(matcher, index, uidMap);
+ optional<InvalidConfigReason> invalidConfigReason;
+ sp<AtomMatchingTracker> tracker =
+ createAtomMatchingTracker(matcher, index, uidMap, invalidConfigReason);
EXPECT_NE(tracker, nullptr);
+ EXPECT_EQ(invalidConfigReason, nullopt);
EXPECT_TRUE(tracker->mInitialized);
EXPECT_EQ(tracker->getId(), id);
EXPECT_EQ(tracker->mIndex, index);
const set<int>& atomIds = tracker->getAtomIds();
ASSERT_EQ(atomIds.size(), 1);
- EXPECT_EQ(atomIds.count(util::SCREEN_STATE_CHANGED), 1);
+ EXPECT_EQ(atomIds.count(SCREEN_STATE_ATOM_ID), 1);
}
-TEST(MetricsManagerTest, TestCreateAtomMatchingTrackerCombination) {
+TEST_F(MetricsManagerUtilTest, TestCreateAtomMatchingTrackerCombination) {
int index = 1;
int64_t id = 123;
sp<UidMap> uidMap = new UidMap();
@@ -724,8 +1207,11 @@
combination->add_matcher(123);
combination->add_matcher(223);
- sp<AtomMatchingTracker> tracker = createAtomMatchingTracker(matcher, index, uidMap);
+ optional<InvalidConfigReason> invalidConfigReason;
+ sp<AtomMatchingTracker> tracker =
+ createAtomMatchingTracker(matcher, index, uidMap, invalidConfigReason);
EXPECT_NE(tracker, nullptr);
+ EXPECT_EQ(invalidConfigReason, nullopt);
// Combination matchers need to be initialized first.
EXPECT_FALSE(tracker->mInitialized);
@@ -735,16 +1221,21 @@
ASSERT_EQ(atomIds.size(), 0);
}
-TEST(MetricsManagerTest, TestCreateConditionTrackerInvalid) {
+TEST_F(MetricsManagerUtilTest, TestCreateConditionTrackerInvalid) {
const ConfigKey key(123, 456);
// Predicate has no contents_case (simple/combination), so it is invalid.
Predicate predicate;
predicate.set_id(21);
unordered_map<int64_t, int> atomTrackerMap;
- EXPECT_EQ(createConditionTracker(key, predicate, 0, atomTrackerMap), nullptr);
+ optional<InvalidConfigReason> invalidConfigReason;
+ EXPECT_EQ(createConditionTracker(key, predicate, 0, atomTrackerMap, invalidConfigReason),
+ nullptr);
+ EXPECT_EQ(invalidConfigReason,
+ createInvalidConfigReasonWithPredicate(
+ INVALID_CONFIG_REASON_CONDITION_MALFORMED_CONTENTS_CASE, predicate.id()));
}
-TEST(MetricsManagerTest, TestCreateConditionTrackerSimple) {
+TEST_F(MetricsManagerUtilTest, TestCreateConditionTrackerSimple) {
int index = 1;
int64_t id = 987;
const ConfigKey key(123, 456);
@@ -764,7 +1255,10 @@
atomTrackerMap[stopMatcherId] = stopMatcherIndex;
atomTrackerMap[stopAllMatcherId] = stopAllMatcherIndex;
- sp<ConditionTracker> tracker = createConditionTracker(key, predicate, index, atomTrackerMap);
+ optional<InvalidConfigReason> invalidConfigReason;
+ sp<ConditionTracker> tracker =
+ createConditionTracker(key, predicate, index, atomTrackerMap, invalidConfigReason);
+ EXPECT_EQ(invalidConfigReason, nullopt);
EXPECT_EQ(tracker->getConditionId(), id);
EXPECT_EQ(tracker->isSliced(), false);
EXPECT_TRUE(tracker->IsSimpleCondition());
@@ -775,7 +1269,7 @@
ASSERT_EQ(interestedMatchers.count(stopAllMatcherIndex), 1);
}
-TEST(MetricsManagerTest, TestCreateConditionTrackerCombination) {
+TEST_F(MetricsManagerUtilTest, TestCreateConditionTrackerCombination) {
int index = 1;
int64_t id = 987;
const ConfigKey key(123, 456);
@@ -789,12 +1283,15 @@
// Combination conditions must be initialized to set most state.
unordered_map<int64_t, int> atomTrackerMap;
- sp<ConditionTracker> tracker = createConditionTracker(key, predicate, index, atomTrackerMap);
+ optional<InvalidConfigReason> invalidConfigReason;
+ sp<ConditionTracker> tracker =
+ createConditionTracker(key, predicate, index, atomTrackerMap, invalidConfigReason);
+ EXPECT_EQ(invalidConfigReason, nullopt);
EXPECT_EQ(tracker->getConditionId(), id);
EXPECT_FALSE(tracker->IsSimpleCondition());
}
-TEST(MetricsManagerTest, TestCreateAnomalyTrackerInvalidMetric) {
+TEST_F(MetricsManagerUtilTest, TestCreateAnomalyTrackerInvalidMetric) {
Alert alert;
alert.set_id(123);
alert.set_metric_id(1);
@@ -803,13 +1300,17 @@
sp<AlarmMonitor> anomalyAlarmMonitor;
vector<sp<MetricProducer>> metricProducers;
+ optional<InvalidConfigReason> invalidConfigReason;
// Pass in empty metric producers, causing an error.
EXPECT_EQ(createAnomalyTracker(alert, anomalyAlarmMonitor, UPDATE_NEW, /*updateTime=*/123, {},
- metricProducers),
+ metricProducers, invalidConfigReason),
nullopt);
+ EXPECT_EQ(invalidConfigReason,
+ createInvalidConfigReasonWithAlert(INVALID_CONFIG_REASON_ALERT_METRIC_NOT_FOUND,
+ alert.metric_id(), alert.id()));
}
-TEST(MetricsManagerTest, TestCreateAnomalyTrackerNoThreshold) {
+TEST_F(MetricsManagerUtilTest, TestCreateAnomalyTrackerNoThreshold) {
int64_t metricId = 1;
Alert alert;
alert.set_id(123);
@@ -823,12 +1324,16 @@
vector<sp<MetricProducer>> metricProducers({new CountMetricProducer(
kConfigKey, metric, 0, {ConditionState::kUnknown}, wizard, 0x0123456789, 0, 0)});
sp<AlarmMonitor> anomalyAlarmMonitor;
+ optional<InvalidConfigReason> invalidConfigReason;
EXPECT_EQ(createAnomalyTracker(alert, anomalyAlarmMonitor, UPDATE_NEW, /*updateTime=*/123,
- {{1, 0}}, metricProducers),
+ {{1, 0}}, metricProducers, invalidConfigReason),
nullopt);
+ EXPECT_EQ(invalidConfigReason,
+ createInvalidConfigReasonWithAlert(INVALID_CONFIG_REASON_ALERT_THRESHOLD_MISSING,
+ alert.id()));
}
-TEST(MetricsManagerTest, TestCreateAnomalyTrackerMissingBuckets) {
+TEST_F(MetricsManagerUtilTest, TestCreateAnomalyTrackerMissingBuckets) {
int64_t metricId = 1;
Alert alert;
alert.set_id(123);
@@ -842,12 +1347,16 @@
vector<sp<MetricProducer>> metricProducers({new CountMetricProducer(
kConfigKey, metric, 0, {ConditionState::kUnknown}, wizard, 0x0123456789, 0, 0)});
sp<AlarmMonitor> anomalyAlarmMonitor;
+ optional<InvalidConfigReason> invalidConfigReason;
EXPECT_EQ(createAnomalyTracker(alert, anomalyAlarmMonitor, UPDATE_NEW, /*updateTime=*/123,
- {{1, 0}}, metricProducers),
+ {{1, 0}}, metricProducers, invalidConfigReason),
nullopt);
+ EXPECT_EQ(invalidConfigReason,
+ createInvalidConfigReasonWithAlert(
+ INVALID_CONFIG_REASON_ALERT_INVALID_TRIGGER_OR_NUM_BUCKETS, alert.id()));
}
-TEST(MetricsManagerTest, TestCreateAnomalyTrackerGood) {
+TEST_F(MetricsManagerUtilTest, TestCreateAnomalyTrackerGood) {
int64_t metricId = 1;
Alert alert;
alert.set_id(123);
@@ -862,12 +1371,14 @@
vector<sp<MetricProducer>> metricProducers({new CountMetricProducer(
kConfigKey, metric, 0, {ConditionState::kUnknown}, wizard, 0x0123456789, 0, 0)});
sp<AlarmMonitor> anomalyAlarmMonitor;
+ optional<InvalidConfigReason> invalidConfigReason;
EXPECT_NE(createAnomalyTracker(alert, anomalyAlarmMonitor, UPDATE_NEW, /*updateTime=*/123,
- {{1, 0}}, metricProducers),
+ {{1, 0}}, metricProducers, invalidConfigReason),
nullopt);
+ EXPECT_EQ(invalidConfigReason, nullopt);
}
-TEST(MetricsManagerTest, TestCreateAnomalyTrackerDurationTooLong) {
+TEST_F(MetricsManagerUtilTest, TestCreateAnomalyTrackerDurationTooLong) {
int64_t metricId = 1;
Alert alert;
alert.set_id(123);
@@ -887,12 +1398,16 @@
1 /* start index */, 2 /* stop index */, 3 /* stop_all index */, false /*nesting*/,
wizard, 0x0123456789, dimensions, 0, 0)});
sp<AlarmMonitor> anomalyAlarmMonitor;
+ optional<InvalidConfigReason> invalidConfigReason;
EXPECT_EQ(createAnomalyTracker(alert, anomalyAlarmMonitor, UPDATE_NEW, /*updateTime=*/123,
- {{1, 0}}, metricProducers),
+ {{1, 0}}, metricProducers, invalidConfigReason),
nullopt);
+ EXPECT_EQ(invalidConfigReason,
+ createInvalidConfigReasonWithAlert(INVALID_CONFIG_REASON_ALERT_CANNOT_ADD_ANOMALY,
+ alert.metric_id(), alert.id()));
}
-TEST(MetricsManagerTest, TestCreateDurationProducerDimensionsInWhatInvalid) {
+TEST_F(MetricsManagerUtilTest, TestCreateDurationProducerDimensionsInWhatInvalid) {
StatsdConfig config;
config.add_allowed_log_source("AID_ROOT");
*config.add_atom_matcher() = CreateAcquireWakelockAtomMatcher();
@@ -929,6 +1444,371 @@
EXPECT_FALSE(metricsManager->isConfigValid());
}
+TEST_F(MetricsManagerUtilTest, TestSampledMetrics) {
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+
+ AtomMatcher appCrashMatcher =
+ CreateSimpleAtomMatcher("APP_CRASH_OCCURRED", util::APP_CRASH_OCCURRED);
+ *config.add_atom_matcher() = appCrashMatcher;
+
+ *config.add_atom_matcher() = CreateAcquireWakelockAtomMatcher();
+ *config.add_atom_matcher() = CreateReleaseWakelockAtomMatcher();
+
+ AtomMatcher bleScanResultReceivedMatcher = CreateSimpleAtomMatcher(
+ "BleScanResultReceivedAtomMatcher", util::BLE_SCAN_RESULT_RECEIVED);
+ *config.add_atom_matcher() = bleScanResultReceivedMatcher;
+
+ Predicate holdingWakelockPredicate = CreateHoldingWakelockPredicate();
+ *holdingWakelockPredicate.mutable_simple_predicate()->mutable_dimensions() =
+ CreateAttributionUidDimensions(util::WAKELOCK_STATE_CHANGED, {Position::FIRST});
+ *config.add_predicate() = holdingWakelockPredicate;
+
+ CountMetric sampledCountMetric =
+ createCountMetric("CountSampledAppCrashesPerUid", appCrashMatcher.id(), nullopt, {});
+ *sampledCountMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ *sampledCountMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ sampledCountMetric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = sampledCountMetric;
+
+ CountMetric unsampledCountMetric =
+ createCountMetric("CountAppCrashesPerUid", appCrashMatcher.id(), nullopt, {});
+ *unsampledCountMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ *config.add_count_metric() = unsampledCountMetric;
+
+ DurationMetric sampledDurationMetric = createDurationMetric(
+ "DurationSampledWakelockPerUid", holdingWakelockPredicate.id(), nullopt, {});
+ *sampledDurationMetric.mutable_dimensions_in_what() =
+ CreateAttributionUidDimensions(util::WAKELOCK_STATE_CHANGED, {Position::FIRST});
+ *sampledDurationMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateAttributionUidDimensions(util::WAKELOCK_STATE_CHANGED, {Position::FIRST});
+ sampledDurationMetric.mutable_dimensional_sampling_info()->set_shard_count(4);
+ *config.add_duration_metric() = sampledDurationMetric;
+
+ DurationMetric unsampledDurationMetric = createDurationMetric(
+ "DurationWakelockPerUid", holdingWakelockPredicate.id(), nullopt, {});
+ unsampledDurationMetric.set_aggregation_type(DurationMetric::SUM);
+ *unsampledDurationMetric.mutable_dimensions_in_what() =
+ CreateAttributionUidDimensions(util::WAKELOCK_STATE_CHANGED, {Position::FIRST});
+ *config.add_duration_metric() = unsampledDurationMetric;
+
+ ValueMetric sampledValueMetric =
+ createValueMetric("ValueSampledBleScanResultsPerUid", bleScanResultReceivedMatcher,
+ /*num_results=*/2, nullopt, {});
+ *sampledValueMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::BLE_SCAN_RESULT_RECEIVED, {1 /* uid */});
+ *sampledValueMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::BLE_SCAN_RESULT_RECEIVED, {1 /*uid*/});
+ sampledValueMetric.mutable_dimensional_sampling_info()->set_shard_count(6);
+ *config.add_value_metric() = sampledValueMetric;
+
+ ValueMetric unsampledValueMetric =
+ createValueMetric("ValueBleScanResultsPerUid", bleScanResultReceivedMatcher,
+ /*num_results=*/2, nullopt, {});
+ *unsampledValueMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::BLE_SCAN_RESULT_RECEIVED, {1 /* uid */});
+ *config.add_value_metric() = unsampledValueMetric;
+
+ KllMetric sampledKllMetric =
+ createKllMetric("KllSampledBleScanResultsPerUid", bleScanResultReceivedMatcher,
+ /*num_results=*/2, nullopt);
+ *sampledKllMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::BLE_SCAN_RESULT_RECEIVED, {1 /* uid */});
+ *sampledKllMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::BLE_SCAN_RESULT_RECEIVED, {1 /*uid*/});
+ sampledKllMetric.mutable_dimensional_sampling_info()->set_shard_count(8);
+ *config.add_kll_metric() = sampledKllMetric;
+
+ KllMetric unsampledKllMetric = createKllMetric(
+ "KllBleScanResultsPerUid", bleScanResultReceivedMatcher, /*num_results=*/2, nullopt);
+ *unsampledKllMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::BLE_SCAN_RESULT_RECEIVED, {1 /* uid */});
+ *config.add_kll_metric() = unsampledKllMetric;
+
+ GaugeMetric sampledGaugeMetric =
+ createGaugeMetric("GaugeSampledAppCrashesPerUid", appCrashMatcher.id(),
+ GaugeMetric::FIRST_N_SAMPLES, nullopt, nullopt);
+ *sampledGaugeMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /* uid */});
+ *sampledGaugeMetric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ sampledGaugeMetric.mutable_dimensional_sampling_info()->set_shard_count(10);
+ *config.add_gauge_metric() = sampledGaugeMetric;
+
+ GaugeMetric unsampledGaugeMetric =
+ createGaugeMetric("GaugeAppCrashesPerUid", appCrashMatcher.id(),
+ GaugeMetric::FIRST_N_SAMPLES, nullopt, nullopt);
+ *unsampledGaugeMetric.mutable_dimensions_in_what() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /* uid */});
+ *config.add_gauge_metric() = unsampledGaugeMetric;
+
+ ConfigKey key(123, 987);
+ uint64_t timeNs = 456;
+ sp<StatsPullerManager> pullerManager = new StatsPullerManager();
+ sp<AlarmMonitor> anomalyAlarmMonitor;
+ sp<AlarmMonitor> periodicAlarmMonitor;
+ sp<UidMap> uidMap;
+ sp<MetricsManager> metricsManager =
+ new MetricsManager(key, config, timeNs, timeNs, uidMap, pullerManager,
+ anomalyAlarmMonitor, periodicAlarmMonitor);
+ ASSERT_TRUE(metricsManager->isConfigValid());
+ ASSERT_EQ(10, metricsManager->mAllMetricProducers.size());
+
+ sp<MetricProducer> sampledCountMetricProducer = metricsManager->mAllMetricProducers[0];
+ sp<MetricProducer> unsampledCountMetricProducer = metricsManager->mAllMetricProducers[1];
+ sp<MetricProducer> sampledDurationMetricProducer = metricsManager->mAllMetricProducers[2];
+ sp<MetricProducer> unsampledDurationMetricProducer = metricsManager->mAllMetricProducers[3];
+ sp<MetricProducer> sampledValueMetricProducer = metricsManager->mAllMetricProducers[4];
+ sp<MetricProducer> unsampledValueMetricProducer = metricsManager->mAllMetricProducers[5];
+ sp<MetricProducer> sampledKllMetricProducer = metricsManager->mAllMetricProducers[6];
+ sp<MetricProducer> unsampledKllMetricProducer = metricsManager->mAllMetricProducers[7];
+ sp<MetricProducer> sampledGaugeMetricProducer = metricsManager->mAllMetricProducers[8];
+ sp<MetricProducer> unsampledGaugeMetricProducer = metricsManager->mAllMetricProducers[9];
+
+ // Check shard count is set correctly for sampled metrics or set to default.
+ EXPECT_EQ(2, sampledCountMetricProducer->mShardCount);
+ EXPECT_EQ(0, unsampledCountMetricProducer->mShardCount);
+ EXPECT_EQ(4, sampledDurationMetricProducer->mShardCount);
+ EXPECT_EQ(0, unsampledDurationMetricProducer->mShardCount);
+ EXPECT_EQ(6, sampledValueMetricProducer->mShardCount);
+ EXPECT_EQ(0, unsampledValueMetricProducer->mShardCount);
+ EXPECT_EQ(8, sampledKllMetricProducer->mShardCount);
+ EXPECT_EQ(0, unsampledKllMetricProducer->mShardCount);
+ EXPECT_EQ(10, sampledGaugeMetricProducer->mShardCount);
+ EXPECT_EQ(0, unsampledGaugeMetricProducer->mShardCount);
+
+ // Check sampled what fields is set correctly or empty.
+ EXPECT_EQ(1, sampledCountMetricProducer->mSampledWhatFields.size());
+ EXPECT_EQ(true, unsampledCountMetricProducer->mSampledWhatFields.empty());
+ EXPECT_EQ(1, sampledDurationMetricProducer->mSampledWhatFields.size());
+ EXPECT_EQ(true, unsampledDurationMetricProducer->mSampledWhatFields.empty());
+ EXPECT_EQ(1, sampledValueMetricProducer->mSampledWhatFields.size());
+ EXPECT_EQ(true, unsampledValueMetricProducer->mSampledWhatFields.empty());
+ EXPECT_EQ(1, sampledKllMetricProducer->mSampledWhatFields.size());
+ EXPECT_EQ(true, unsampledKllMetricProducer->mSampledWhatFields.empty());
+ EXPECT_EQ(1, sampledGaugeMetricProducer->mSampledWhatFields.size());
+ EXPECT_EQ(true, unsampledGaugeMetricProducer->mSampledWhatFields.empty());
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricHasShardCountButNoSampledField) {
+ AtomMatcher appCrashMatcher =
+ CreateSimpleAtomMatcher("APP_CRASH_OCCURRED", util::APP_CRASH_OCCURRED);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ *config.add_atom_matcher() = appCrashMatcher;
+
+ CountMetric metric =
+ createCountMetric("CountSampledAppCrashesPerUid", appCrashMatcher.id(), nullopt, {});
+ *metric.mutable_dimensions_in_what() = CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ metric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = metric;
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_DIMENSIONAL_SAMPLING_INFO_MISSING_SAMPLED_FIELD,
+ metric.id()));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricHasSampledFieldIncorrectShardCount) {
+ AtomMatcher appCrashMatcher =
+ CreateSimpleAtomMatcher("APP_CRASH_OCCURRED", util::APP_CRASH_OCCURRED);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ *config.add_atom_matcher() = appCrashMatcher;
+
+ CountMetric metric =
+ createCountMetric("CountSampledAppCrashesPerUid", appCrashMatcher.id(), nullopt, {});
+ *metric.mutable_dimensions_in_what() = CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ *metric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ *config.add_count_metric() = metric;
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(
+ INVALID_CONFIG_REASON_METRIC_DIMENSIONAL_SAMPLING_INFO_INCORRECT_SHARD_COUNT,
+ metric.id()));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricHasMultipleSampledFields) {
+ AtomMatcher appCrashMatcher =
+ CreateSimpleAtomMatcher("APP_CRASH_OCCURRED", util::APP_CRASH_OCCURRED);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ *config.add_atom_matcher() = appCrashMatcher;
+
+ CountMetric metric =
+ createCountMetric("CountSampledAppCrashesPerUid", appCrashMatcher.id(), nullopt, {});
+ *metric.mutable_dimensions_in_what() = CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ *metric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/, 2 /*event_type*/});
+ metric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = metric;
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELD_INCORRECT_SIZE,
+ metric.id()));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricHasRepeatedSampledField_PositionALL) {
+ AtomMatcher testAtomReportedMatcher =
+ CreateSimpleAtomMatcher("TEST_ATOM_REPORTED", util::TEST_ATOM_REPORTED);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ *config.add_atom_matcher() = testAtomReportedMatcher;
+
+ CountMetric metric = createCountMetric("CountSampledTestAtomReportedPerRepeatedIntField",
+ testAtomReportedMatcher.id(), nullopt, {});
+ *metric.mutable_dimensions_in_what() = CreateRepeatedDimensions(
+ util::TEST_ATOM_REPORTED, {9 /*repeated_int_field*/}, {Position::ALL});
+ *metric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateRepeatedDimensions(util::TEST_ATOM_REPORTED, {9 /*repeated_int_field*/},
+ {Position::ALL});
+ metric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = metric;
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELD_INCORRECT_SIZE,
+ metric.id()));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricHasRepeatedSampledField_PositionFIRST) {
+ AtomMatcher testAtomReportedMatcher =
+ CreateSimpleAtomMatcher("TEST_ATOM_REPORTED", util::TEST_ATOM_REPORTED);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ *config.add_atom_matcher() = testAtomReportedMatcher;
+
+ CountMetric metric = createCountMetric("CountSampledTestAtomReportedPerRepeatedIntField",
+ testAtomReportedMatcher.id(), nullopt, {});
+ *metric.mutable_dimensions_in_what() = CreateRepeatedDimensions(
+ util::TEST_ATOM_REPORTED, {9 /*repeated_int_field*/}, {Position::FIRST});
+ *metric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateRepeatedDimensions(util::TEST_ATOM_REPORTED, {9 /*repeated_int_field*/},
+ {Position::FIRST});
+ metric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = metric;
+
+ EXPECT_EQ(initConfig(config), nullopt);
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricHasRepeatedSampledField_PositionLAST) {
+ AtomMatcher testAtomReportedMatcher =
+ CreateSimpleAtomMatcher("TEST_ATOM_REPORTED", util::TEST_ATOM_REPORTED);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ *config.add_atom_matcher() = testAtomReportedMatcher;
+
+ CountMetric metric = createCountMetric("CountSampledTestAtomReportedPerRepeatedIntField",
+ testAtomReportedMatcher.id(), nullopt, {});
+ *metric.mutable_dimensions_in_what() = CreateRepeatedDimensions(
+ util::TEST_ATOM_REPORTED, {9 /*repeated_int_field*/}, {Position::LAST});
+ *metric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateRepeatedDimensions(util::TEST_ATOM_REPORTED, {9 /*repeated_int_field*/},
+ {Position::LAST});
+ metric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = metric;
+
+ EXPECT_EQ(initConfig(config), nullopt);
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricHasRepeatedSampledField_PositionANY) {
+ AtomMatcher testAtomReportedMatcher =
+ CreateSimpleAtomMatcher("TEST_ATOM_REPORTED", util::TEST_ATOM_REPORTED);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ *config.add_atom_matcher() = testAtomReportedMatcher;
+
+ CountMetric metric = createCountMetric("CountSampledTestAtomReportedPerRepeatedIntField",
+ testAtomReportedMatcher.id(), nullopt, {});
+ *metric.mutable_dimensions_in_what() = CreateRepeatedDimensions(
+ util::TEST_ATOM_REPORTED, {9 /*repeated_int_field*/}, {Position::ANY});
+ *metric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateRepeatedDimensions(util::TEST_ATOM_REPORTED, {9 /*repeated_int_field*/},
+ {Position::ALL});
+ metric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = metric;
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELD_INCORRECT_SIZE,
+ metric.id()));
+}
+
+TEST_F(MetricsManagerUtilTest, TestMetricSampledFieldNotSubsetDimension) {
+ AtomMatcher appCrashMatcher =
+ CreateSimpleAtomMatcher("APP_CRASH_OCCURRED", util::APP_CRASH_OCCURRED);
+
+ StatsdConfig config;
+ config.add_allowed_log_source("AID_ROOT");
+ *config.add_atom_matcher() = appCrashMatcher;
+
+ CountMetric metric =
+ createCountMetric("CountSampledAppCrashesPerUid", appCrashMatcher.id(), nullopt, {});
+ *metric.mutable_dimensional_sampling_info()->mutable_sampled_what_field() =
+ CreateDimensions(util::APP_CRASH_OCCURRED, {1 /*uid*/});
+ metric.mutable_dimensional_sampling_info()->set_shard_count(2);
+ *config.add_count_metric() = metric;
+
+ EXPECT_EQ(
+ initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_METRIC_SAMPLED_FIELDS_NOT_SUBSET_DIM_IN_WHAT,
+ metric.id()));
+}
+
+TEST_F(MetricsManagerUtilTest, TestCountMetricHasRestrictedDelegate) {
+ StatsdConfig config;
+ CountMetric* metric = config.add_count_metric();
+ config.set_restricted_metrics_delegate_package_name("com.android.app.test");
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED));
+}
+
+TEST_F(MetricsManagerUtilTest, TestDurationMetricHasRestrictedDelegate) {
+ StatsdConfig config;
+ DurationMetric* metric = config.add_duration_metric();
+ config.set_restricted_metrics_delegate_package_name("com.android.app.test");
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED));
+}
+
+TEST_F(MetricsManagerUtilTest, TestGaugeMetricHasRestrictedDelegate) {
+ StatsdConfig config;
+ GaugeMetric* metric = config.add_gauge_metric();
+ config.set_restricted_metrics_delegate_package_name("com.android.app.test");
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED));
+}
+
+TEST_F(MetricsManagerUtilTest, TestNumericValueMetricHasRestrictedDelegate) {
+ StatsdConfig config;
+ ValueMetric* metric = config.add_value_metric();
+ config.set_restricted_metrics_delegate_package_name("com.android.app.test");
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED));
+}
+
+TEST_F(MetricsManagerUtilTest, TestKllMetricHasRestrictedDelegate) {
+ StatsdConfig config;
+ KllMetric* metric = config.add_kll_metric();
+ config.set_restricted_metrics_delegate_package_name("com.android.app.test");
+
+ EXPECT_EQ(initConfig(config),
+ InvalidConfigReason(INVALID_CONFIG_REASON_RESTRICTED_METRIC_NOT_SUPPORTED));
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/shell/ShellSubscriber_test.cpp b/statsd/tests/shell/ShellSubscriber_test.cpp
index 37f5bf3..a994966 100644
--- a/statsd/tests/shell/ShellSubscriber_test.cpp
+++ b/statsd/tests/shell/ShellSubscriber_test.cpp
@@ -14,134 +14,51 @@
#include "src/shell/ShellSubscriber.h"
+#include <aidl/android/os/StatsSubscriptionCallbackReason.h>
#include <gtest/gtest.h>
#include <stdio.h>
#include <unistd.h>
+#include <optional>
#include <vector>
+#include "frameworks/proto_logging/stats/atoms.pb.h"
+#include "gtest_matchers.h"
#include "src/shell/shell_config.pb.h"
#include "src/shell/shell_data.pb.h"
-#include "frameworks/proto_logging/stats/atoms.pb.h"
#include "stats_event.h"
+#include "statslog_statsdtest.h"
#include "tests/metrics/metrics_test_helper.h"
#include "tests/statsd_test_util.h"
-using namespace android::os::statsd;
+using ::aidl::android::os::StatsSubscriptionCallbackReason;
using android::sp;
+using android::os::statsd::TestAtomReported;
+using android::os::statsd::TrainExperimentIds;
+using android::os::statsd::util::BytesField;
+using android::os::statsd::util::CPU_ACTIVE_TIME;
+using android::os::statsd::util::PHONE_SIGNAL_STRENGTH_CHANGED;
+using android::os::statsd::util::PLUGGED_STATE_CHANGED;
+using android::os::statsd::util::SCREEN_STATE_CHANGED;
+using android::os::statsd::util::TEST_ATOM_REPORTED;
using std::vector;
using testing::_;
+using testing::A;
+using testing::ByMove;
+using testing::DoAll;
using testing::Invoke;
using testing::NaggyMock;
+using testing::Return;
+using testing::SaveArg;
+using testing::SetArgPointee;
using testing::StrictMock;
+namespace android {
+namespace os {
+namespace statsd {
+
#ifdef __ANDROID__
-void runShellTest(ShellSubscription config, sp<MockUidMap> uidMap,
- sp<MockStatsPullerManager> pullerManager,
- const vector<std::shared_ptr<LogEvent>>& pushedEvents,
- const ShellData& expectedData) {
- // set up 2 pipes for read/write config and data
- int fds_config[2];
- ASSERT_EQ(0, pipe(fds_config));
-
- int fds_data[2];
- ASSERT_EQ(0, pipe(fds_data));
-
- size_t bufferSize = config.ByteSize();
- // write the config to pipe, first write size of the config
- write(fds_config[1], &bufferSize, sizeof(bufferSize));
- // then write config itself
- vector<uint8_t> buffer(bufferSize);
- config.SerializeToArray(&buffer[0], bufferSize);
- write(fds_config[1], buffer.data(), bufferSize);
- close(fds_config[1]);
-
- sp<ShellSubscriber> shellClient = new ShellSubscriber(uidMap, pullerManager);
-
- // mimic a binder thread that a shell subscriber runs on. it would block.
- std::thread reader([&shellClient, &fds_config, &fds_data] {
- shellClient->startNewSubscription(fds_config[0], fds_data[1], /*timeoutSec=*/-1);
- });
- reader.detach();
-
- // let the shell subscriber to receive the config from pipe.
- std::this_thread::sleep_for(100ms);
-
- if (pushedEvents.size() > 0) {
- // send a log event that matches the config.
- std::thread log_reader([&shellClient, &pushedEvents] {
- for (const auto& event : pushedEvents) {
- shellClient->onLogEvent(*event);
- }
- });
-
- log_reader.detach();
-
- if (log_reader.joinable()) {
- log_reader.join();
- }
- }
-
- // wait for the data to be written.
- std::this_thread::sleep_for(100ms);
-
- // Because we might receive heartbeats from statsd, consisting of data sizes
- // of 0, encapsulate reads within a while loop.
- bool readAtom = false;
- while (!readAtom) {
- // Read the atom size.
- size_t dataSize = 0;
- read(fds_data[0], &dataSize, sizeof(dataSize));
- if (dataSize == 0) continue;
- EXPECT_EQ(expectedData.ByteSize(), int(dataSize));
-
- // Read that much data in proto binary format.
- vector<uint8_t> dataBuffer(dataSize);
- EXPECT_EQ((int)dataSize, read(fds_data[0], dataBuffer.data(), dataSize));
-
- // Make sure the received bytes can be parsed to an atom.
- ShellData receivedAtom;
- EXPECT_TRUE(receivedAtom.ParseFromArray(dataBuffer.data(), dataSize) != 0);
-
- // Serialize the expected atom to byte array and compare to make sure
- // they are the same.
- vector<uint8_t> expectedAtomBuffer(expectedData.ByteSize());
- expectedData.SerializeToArray(expectedAtomBuffer.data(), expectedData.ByteSize());
- EXPECT_EQ(expectedAtomBuffer, dataBuffer);
-
- readAtom = true;
- }
-
- close(fds_data[0]);
- if (reader.joinable()) {
- reader.join();
- }
-}
-
-TEST(ShellSubscriberTest, testPushedSubscription) {
- sp<MockUidMap> uidMap = new NaggyMock<MockUidMap>();
-
- sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
- vector<std::shared_ptr<LogEvent>> pushedList;
-
- // Create the LogEvent from an AStatsEvent
- std::unique_ptr<LogEvent> logEvent = CreateScreenStateChangedEvent(
- 1000 /*timestamp*/, ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
- pushedList.push_back(std::move(logEvent));
-
- // create a simple config to get screen events
- ShellSubscription config;
- config.add_pushed()->set_atom_id(29);
-
- // this is the expected screen event atom.
- ShellData shellData;
- shellData.add_atom()->mutable_screen_state_changed()->set_state(
- ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
-
- runShellTest(config, uidMap, pullerManager, pushedList, shellData);
-}
-
namespace {
int kUid1 = 1000;
@@ -150,31 +67,45 @@
int kCpuTime1 = 100;
int kCpuTime2 = 200;
-ShellData getExpectedShellData() {
+int64_t kCpuActiveTimeEventTimestampNs = 1111L;
+
+// Number of clients running simultaneously
+
+// Just a single client
+const int kSingleClient = 1;
+// One more client than allowed binder threads
+const int kNumClients = 11;
+
+// Utility to make an expected pulled atom shell data
+ShellData getExpectedPulledData() {
ShellData shellData;
auto* atom1 = shellData.add_atom()->mutable_cpu_active_time();
atom1->set_uid(kUid1);
atom1->set_time_millis(kCpuTime1);
+ shellData.add_elapsed_timestamp_nanos(kCpuActiveTimeEventTimestampNs);
auto* atom2 = shellData.add_atom()->mutable_cpu_active_time();
atom2->set_uid(kUid2);
atom2->set_time_millis(kCpuTime2);
+ shellData.add_elapsed_timestamp_nanos(kCpuActiveTimeEventTimestampNs);
return shellData;
}
+// Utility to make a pulled atom Shell Config
ShellSubscription getPulledConfig() {
ShellSubscription config;
auto* pull_config = config.add_pulled();
- pull_config->mutable_matcher()->set_atom_id(10016);
+ pull_config->mutable_matcher()->set_atom_id(CPU_ACTIVE_TIME);
pull_config->set_freq_millis(2000);
return config;
}
+// Utility to adjust CPU time for pulled events
shared_ptr<LogEvent> makeCpuActiveTimeAtom(int32_t uid, int64_t timeMillis) {
AStatsEvent* statsEvent = AStatsEvent_obtain();
- AStatsEvent_setAtomId(statsEvent, 10016);
- AStatsEvent_overwriteTimestamp(statsEvent, 1111L);
+ AStatsEvent_setAtomId(statsEvent, CPU_ACTIVE_TIME);
+ AStatsEvent_overwriteTimestamp(statsEvent, kCpuActiveTimeEventTimestampNs);
AStatsEvent_writeInt32(statsEvent, uid);
AStatsEvent_writeInt64(statsEvent, timeMillis);
@@ -183,14 +114,547 @@
return logEvent;
}
+// Utility to create pushed atom LogEvents
+vector<std::shared_ptr<LogEvent>> getPushedEvents() {
+ vector<std::shared_ptr<LogEvent>> pushedList;
+ // Create the LogEvent from an AStatsEvent
+ std::unique_ptr<LogEvent> logEvent1 = CreateScreenStateChangedEvent(
+ 1000 /*timestamp*/, ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
+ std::unique_ptr<LogEvent> logEvent2 = CreateScreenStateChangedEvent(
+ 2000 /*timestamp*/, ::android::view::DisplayStateEnum::DISPLAY_STATE_OFF);
+ std::unique_ptr<LogEvent> logEvent3 = CreateBatteryStateChangedEvent(
+ 3000 /*timestamp*/, BatteryPluggedStateEnum::BATTERY_PLUGGED_USB);
+ std::unique_ptr<LogEvent> logEvent4 = CreateBatteryStateChangedEvent(
+ 4000 /*timestamp*/, BatteryPluggedStateEnum::BATTERY_PLUGGED_NONE);
+ pushedList.push_back(std::move(logEvent1));
+ pushedList.push_back(std::move(logEvent2));
+ pushedList.push_back(std::move(logEvent3));
+ pushedList.push_back(std::move(logEvent4));
+ return pushedList;
+}
+
+// Utility to read & return ShellData proto, skipping heartbeats.
+static ShellData readData(int fd) {
+ ssize_t dataSize = 0;
+ while (dataSize == 0) {
+ read(fd, &dataSize, sizeof(dataSize));
+ }
+ // Read that much data in proto binary format.
+ vector<uint8_t> dataBuffer(dataSize);
+ EXPECT_EQ((int)dataSize, read(fd, dataBuffer.data(), dataSize));
+
+ // Make sure the received bytes can be parsed to an atom.
+ ShellData receivedAtom;
+ EXPECT_TRUE(receivedAtom.ParseFromArray(dataBuffer.data(), dataSize) != 0);
+ return receivedAtom;
+}
+
+void runShellTest(ShellSubscription config, sp<MockUidMap> uidMap,
+ sp<MockStatsPullerManager> pullerManager,
+ const vector<std::shared_ptr<LogEvent>>& pushedEvents,
+ const vector<ShellData>& expectedData, int numClients) {
+ sp<ShellSubscriber> shellManager =
+ new ShellSubscriber(uidMap, pullerManager, /*LogEventFilter=*/nullptr);
+
+ size_t bufferSize = config.ByteSize();
+ vector<uint8_t> buffer(bufferSize);
+ config.SerializeToArray(&buffer[0], bufferSize);
+
+ int fds_configs[numClients][2];
+ int fds_datas[numClients][2];
+ for (int i = 0; i < numClients; i++) {
+ // set up 2 pipes for read/write config and data
+ ASSERT_EQ(0, pipe2(fds_configs[i], O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(fds_datas[i], O_CLOEXEC));
+
+ // write the config to pipe, first write size of the config
+ write(fds_configs[i][1], &bufferSize, sizeof(bufferSize));
+ // then write config itself
+ write(fds_configs[i][1], buffer.data(), bufferSize);
+ close(fds_configs[i][1]);
+
+ shellManager->startNewSubscription(fds_configs[i][0], fds_datas[i][1], /*timeoutSec=*/-1);
+ close(fds_configs[i][0]);
+ close(fds_datas[i][1]);
+ }
+
+ // send a log event that matches the config.
+ for (const auto& event : pushedEvents) {
+ shellManager->onLogEvent(*event);
+ }
+
+ for (int i = 0; i < numClients; i++) {
+ vector<ShellData> actualData;
+ for (int j = 1; j <= expectedData.size(); j++) {
+ actualData.push_back(readData(fds_datas[i][0]));
+ }
+
+ EXPECT_THAT(expectedData, UnorderedPointwise(EqShellData(), actualData));
+ }
+
+ // Not closing fds_datas[i][0] because this causes writes within ShellSubscriberClient to hang
+}
+
+unique_ptr<LogEvent> createTestAtomReportedEvent(const uint64_t timestampNs,
+ const int32_t intFieldValue,
+ const vector<int64_t>& expIds) {
+ TrainExperimentIds trainExpIds;
+ *trainExpIds.mutable_experiment_id() = {expIds.begin(), expIds.end()};
+ const vector<uint8_t> trainExpIdsBytes = protoToBytes(trainExpIds);
+ return CreateTestAtomReportedEvent(
+ timestampNs, /* attributionUids */ {1001},
+ /* attributionTags */ {"app1"}, intFieldValue, /*longField */ 0LL,
+ /* floatField */ 0.0f,
+ /* stringField */ "abc", /* boolField */ false, TestAtomReported::OFF, trainExpIdsBytes,
+ /* repeatedIntField */ {}, /* repeatedLongField */ {}, /* repeatedFloatField */ {},
+ /* repeatedStringField */ {}, /* repeatedBoolField */ {},
+ /* repeatedBoolFieldLength */ 0, /* repeatedEnumField */ {});
+}
+
+TestAtomReported createTestAtomReportedProto(const int32_t intFieldValue,
+ const vector<int64_t>& expIds) {
+ TestAtomReported t;
+ auto* attributionNode = t.add_attribution_node();
+ attributionNode->set_uid(1001);
+ attributionNode->set_tag("app1");
+ t.set_int_field(intFieldValue);
+ t.set_long_field(0);
+ t.set_float_field(0.0f);
+ t.set_string_field("abc");
+ t.set_boolean_field(false);
+ t.set_state(TestAtomReported_State_OFF);
+ *t.mutable_bytes_field()->mutable_experiment_id() = {expIds.begin(), expIds.end()};
+ return t;
+}
+
+class ShellSubscriberCallbackTest : public ::testing::Test {
+protected:
+ ShellSubscriberCallbackTest()
+ : uidMap(new NaggyMock<MockUidMap>()),
+ pullerManager(new StrictMock<MockStatsPullerManager>()),
+ mockLogEventFilter(std::make_shared<MockLogEventFilter>()),
+ shellSubscriber(uidMap, pullerManager, mockLogEventFilter),
+ callback(SharedRefBase::make<StrictMock<MockStatsSubscriptionCallback>>()),
+ reason(nullopt) {
+ }
+
+ void SetUp() override {
+ // Save callback arguments when it is invoked.
+ ON_CALL(*callback, onSubscriptionData(_, _))
+ .WillByDefault(DoAll(SaveArg<0>(&reason), SaveArg<1>(&payload),
+ Return(ByMove(Status::ok()))));
+
+ ShellSubscription config;
+ config.add_pushed()->set_atom_id(TEST_ATOM_REPORTED);
+ config.add_pushed()->set_atom_id(SCREEN_STATE_CHANGED);
+ config.add_pushed()->set_atom_id(PHONE_SIGNAL_STRENGTH_CHANGED);
+ configBytes = protoToBytes(config);
+ }
+
+ void TearDown() override {
+ // Expect empty call from the shellSubscriber destructor
+ LogEventFilter::AtomIdSet tagIds;
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(tagIds, &shellSubscriber)).Times(1);
+ }
+
+ sp<MockUidMap> uidMap;
+ sp<MockStatsPullerManager> pullerManager;
+ std::shared_ptr<MockLogEventFilter> mockLogEventFilter;
+ ShellSubscriber shellSubscriber;
+ std::shared_ptr<MockStatsSubscriptionCallback> callback;
+ vector<uint8_t> configBytes;
+
+ // Capture callback arguments.
+ std::optional<StatsSubscriptionCallbackReason> reason;
+ vector<uint8_t> payload;
+};
+
+class ShellSubscriberCallbackPulledTest : public ShellSubscriberCallbackTest {
+protected:
+ void SetUp() override {
+ ShellSubscriberCallbackTest::SetUp();
+
+ const vector<int32_t> uids{AID_SYSTEM};
+ const vector<std::shared_ptr<LogEvent>> pulledData{
+ makeCpuActiveTimeAtom(/*uid=*/kUid1, /*timeMillis=*/kCpuTime1),
+ makeCpuActiveTimeAtom(/*uid=*/kUid2, /*timeMillis=*/kCpuTime2)};
+ ON_CALL(*pullerManager, Pull(CPU_ACTIVE_TIME, uids, _, _))
+ .WillByDefault(DoAll(SetArgPointee<3>(pulledData), Return(true)));
+
+ configBytes = protoToBytes(getPulledConfig());
+
+ // Used to call pullAndSendHeartbeatsIfNeeded directly without depending on sleep.
+ shellSubscriberClient = std::move(ShellSubscriberClient::create(
+ configBytes, callback, /* startTimeSec= */ 0, uidMap, pullerManager));
+ }
+
+ unique_ptr<ShellSubscriberClient> shellSubscriberClient;
+};
+
+LogEventFilter::AtomIdSet CreateAtomIdSetFromShellSubscriptionBytes(const vector<uint8_t>& bytes) {
+ LogEventFilter::AtomIdSet result;
+
+ ShellSubscription config;
+ config.ParseFromArray(bytes.data(), bytes.size());
+
+ for (int i = 0; i < config.pushed_size(); i++) {
+ const auto& pushed = config.pushed(i);
+ EXPECT_TRUE(pushed.has_atom_id());
+ result.insert(pushed.atom_id());
+ }
+
+ return result;
+}
+
} // namespace
+TEST_F(ShellSubscriberCallbackTest, testAddSubscription) {
+ EXPECT_CALL(
+ *mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes), &shellSubscriber))
+ .Times(1);
+ EXPECT_TRUE(shellSubscriber.startNewSubscription(configBytes, callback));
+}
+
+TEST_F(ShellSubscriberCallbackTest, testAddSubscriptionExceedMax) {
+ const size_t maxSubs = ShellSubscriber::getMaxSubscriptions();
+ EXPECT_CALL(
+ *mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes), &shellSubscriber))
+ .Times(maxSubs);
+ vector<bool> results(maxSubs, false);
+ for (int i = 0; i < maxSubs; i++) {
+ results[i] = shellSubscriber.startNewSubscription(configBytes, callback);
+ }
+
+ // First maxSubs subscriptions should succeed.
+ EXPECT_THAT(results, Each(IsTrue()));
+
+ // Subsequent startNewSubscription should fail.
+ EXPECT_FALSE(shellSubscriber.startNewSubscription(configBytes, callback));
+}
+
+TEST_F(ShellSubscriberCallbackTest, testPushedEventsAreCached) {
+ // Expect callback to not be invoked
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(0));
+ EXPECT_CALL(
+ *mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes), &shellSubscriber))
+ .Times(1);
+ shellSubscriber.startNewSubscription(configBytes, callback);
+
+ // Log an event that does NOT invoke the callack.
+ shellSubscriber.onLogEvent(*CreateScreenStateChangedEvent(
+ 1000 /*timestamp*/, ::android::view::DisplayStateEnum::DISPLAY_STATE_ON));
+}
+
+TEST_F(ShellSubscriberCallbackTest, testOverflowCacheIsFlushed) {
+ // Expect callback to be invoked once.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(1));
+ EXPECT_CALL(
+ *mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes), &shellSubscriber))
+ .Times(1);
+ shellSubscriber.startNewSubscription(configBytes, callback);
+
+ shellSubscriber.onLogEvent(*CreateScreenStateChangedEvent(
+ 1000 /*timestamp*/, ::android::view::DisplayStateEnum::DISPLAY_STATE_ON));
+
+ // Inflate size of TestAtomReported through the MODE_BYTES field.
+ const vector<int64_t> expIds = vector<int64_t>(200, INT64_MAX);
+
+ // This event should trigger cache overflow flush.
+ shellSubscriber.onLogEvent(*createTestAtomReportedEvent(/*timestampNs=*/1100,
+ /*intFieldValue=*/1, expIds));
+
+ EXPECT_THAT(reason, Eq(StatsSubscriptionCallbackReason::STATSD_INITIATED));
+
+ // Get ShellData proto from the bytes payload of the callback.
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(payload.data(), payload.size()));
+
+ ShellData expectedShellData;
+ expectedShellData.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
+ *expectedShellData.add_atom()->mutable_test_atom_reported() =
+ createTestAtomReportedProto(/* intFieldValue=*/1, expIds);
+ expectedShellData.add_elapsed_timestamp_nanos(1000);
+ expectedShellData.add_elapsed_timestamp_nanos(1100);
+
+ EXPECT_THAT(actualShellData, EqShellData(expectedShellData));
+}
+
+TEST_F(ShellSubscriberCallbackTest, testFlushTrigger) {
+ // Expect callback to be invoked once.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(1));
+ EXPECT_CALL(
+ *mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes), &shellSubscriber))
+ .Times(1);
+ shellSubscriber.startNewSubscription(configBytes, callback);
+
+ shellSubscriber.onLogEvent(*CreateScreenStateChangedEvent(
+ 1000 /*timestamp*/, ::android::view::DisplayStateEnum::DISPLAY_STATE_ON));
+
+ shellSubscriber.flushSubscription(callback);
+
+ EXPECT_THAT(reason, Eq(StatsSubscriptionCallbackReason::FLUSH_REQUESTED));
+
+ // Get ShellData proto from the bytes payload of the callback.
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(payload.data(), payload.size()));
+
+ ShellData expectedShellData;
+ expectedShellData.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
+ expectedShellData.add_elapsed_timestamp_nanos(1000);
+
+ EXPECT_THAT(actualShellData, EqShellData(expectedShellData));
+}
+
+TEST_F(ShellSubscriberCallbackTest, testFlushTriggerEmptyCache) {
+ // Expect callback to be invoked once.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(1));
+ EXPECT_CALL(
+ *mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes), &shellSubscriber))
+ .Times(1);
+ shellSubscriber.startNewSubscription(configBytes, callback);
+
+ shellSubscriber.flushSubscription(callback);
+
+ EXPECT_THAT(reason, Eq(StatsSubscriptionCallbackReason::FLUSH_REQUESTED));
+
+ // Get ShellData proto from the bytes payload of the callback.
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(payload.data(), payload.size()));
+
+ ShellData expectedShellData;
+
+ EXPECT_THAT(actualShellData, EqShellData(expectedShellData));
+}
+
+TEST_F(ShellSubscriberCallbackTest, testUnsubscribe) {
+ // Expect callback to be invoked once.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(1));
+ Expectation newSubcriptionEvent =
+ EXPECT_CALL(*mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes),
+ &shellSubscriber))
+ .Times(1);
+ LogEventFilter::AtomIdSet idSetEmpty;
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(idSetEmpty, &shellSubscriber))
+ .Times(1)
+ .After(newSubcriptionEvent);
+
+ shellSubscriber.startNewSubscription(configBytes, callback);
+
+ shellSubscriber.onLogEvent(*CreateScreenStateChangedEvent(
+ 1000 /*timestamp*/, ::android::view::DisplayStateEnum::DISPLAY_STATE_ON));
+
+ shellSubscriber.unsubscribe(callback);
+
+ EXPECT_THAT(reason, Eq(StatsSubscriptionCallbackReason::SUBSCRIPTION_ENDED));
+
+ // Get ShellData proto from the bytes payload of the callback.
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(payload.data(), payload.size()));
+
+ ShellData expectedShellData;
+ expectedShellData.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
+ expectedShellData.add_elapsed_timestamp_nanos(1000);
+
+ EXPECT_THAT(actualShellData, EqShellData(expectedShellData));
+
+ // This event is ignored as the subscription has ended.
+ shellSubscriber.onLogEvent(*CreateScreenStateChangedEvent(
+ 1000 /*timestamp*/, ::android::view::DisplayStateEnum::DISPLAY_STATE_ON));
+
+ // This should be a no-op as we've already unsubscribed.
+ shellSubscriber.unsubscribe(callback);
+}
+
+TEST_F(ShellSubscriberCallbackTest, testUnsubscribeEmptyCache) {
+ // Expect callback to be invoked once.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(1));
+ Expectation newSubcriptionEvent =
+ EXPECT_CALL(*mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes),
+ &shellSubscriber))
+ .Times(1);
+ LogEventFilter::AtomIdSet idSetEmpty;
+ EXPECT_CALL(*mockLogEventFilter, setAtomIds(idSetEmpty, &shellSubscriber))
+ .Times(1)
+ .After(newSubcriptionEvent);
+
+ shellSubscriber.startNewSubscription(configBytes, callback);
+
+ shellSubscriber.unsubscribe(callback);
+
+ EXPECT_THAT(reason, Eq(StatsSubscriptionCallbackReason::SUBSCRIPTION_ENDED));
+
+ // Get ShellData proto from the bytes payload of the callback.
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(payload.data(), payload.size()));
+
+ ShellData expectedShellData;
+
+ EXPECT_THAT(actualShellData, EqShellData(expectedShellData));
+}
+
+TEST_F(ShellSubscriberCallbackTest, testTruncateTimestampAtom) {
+ // Expect callback to be invoked once.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(1));
+ EXPECT_CALL(
+ *mockLogEventFilter,
+ setAtomIds(CreateAtomIdSetFromShellSubscriptionBytes(configBytes), &shellSubscriber))
+ .Times(1);
+ shellSubscriber.startNewSubscription(configBytes, callback);
+
+ shellSubscriber.onLogEvent(*CreatePhoneSignalStrengthChangedEvent(
+ NS_PER_SEC * 5 * 60 + 1000 /*timestamp*/,
+ ::android::telephony::SignalStrengthEnum::SIGNAL_STRENGTH_GOOD));
+
+ shellSubscriber.flushSubscription(callback);
+
+ // Get ShellData proto from the bytes payload of the callback.
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(payload.data(), payload.size()));
+
+ ShellData expectedShellData;
+ expectedShellData.add_atom()->mutable_phone_signal_strength_changed()->set_signal_strength(
+ ::android::telephony::SignalStrengthEnum::SIGNAL_STRENGTH_GOOD);
+ expectedShellData.add_elapsed_timestamp_nanos(NS_PER_SEC * 5 * 60);
+
+ EXPECT_THAT(actualShellData, EqShellData(expectedShellData));
+}
+
+TEST_F(ShellSubscriberCallbackPulledTest, testPullIfNeededBeforeInterval) {
+ // Pull should not happen
+ EXPECT_CALL(*pullerManager, Pull(_, A<const vector<int32_t>&>(), _, _)).Times(Exactly(0));
+
+ // Expect callback to not be invoked.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(0));
+
+ shellSubscriberClient->pullAndSendHeartbeatsIfNeeded(/* nowSecs= */ 0, /* nowMillis= */ 0,
+ /* nowNanos= */ 0);
+}
+
+TEST_F(ShellSubscriberCallbackPulledTest, testPullAtInterval) {
+ // Pull should happen once. The data is cached.
+ EXPECT_CALL(*pullerManager, Pull(_, A<const vector<int32_t>&>(), _, _)).Times(Exactly(1));
+
+ // Expect callback to not be invoked.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(0));
+
+ // This pull should NOT trigger a cache flush.
+ shellSubscriberClient->pullAndSendHeartbeatsIfNeeded(/* nowSecs= */ 61, /* nowMillis= */ 61'000,
+ /* nowNanos= */ 61'000'000'000);
+}
+
+TEST_F(ShellSubscriberCallbackPulledTest, testCachedPullIsFlushed) {
+ // Pull should happen once. The data is cached.
+ EXPECT_CALL(*pullerManager, Pull(_, A<const vector<int32_t>&>(), _, _)).Times(Exactly(1));
+
+ // This pull should NOT trigger a cache flush.
+ shellSubscriberClient->pullAndSendHeartbeatsIfNeeded(/* nowSecs= */ 61, /* nowMillis= */ 61'000,
+ /* nowNanos= */ 61'000'000'000);
+
+ // Expect callback to be invoked once flush is requested.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(1));
+
+ // This should flush out data cached from the pull.
+ shellSubscriberClient->flush();
+
+ EXPECT_THAT(reason, Eq(StatsSubscriptionCallbackReason::FLUSH_REQUESTED));
+
+ // Get ShellData proto from the bytes payload of the callback.
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(payload.data(), payload.size()));
+
+ EXPECT_THAT(actualShellData, EqShellData(getExpectedPulledData()));
+}
+
+TEST_F(ShellSubscriberCallbackPulledTest, testPullAtCacheTimeout) {
+ // Pull should happen once. The data is flushed.
+ EXPECT_CALL(*pullerManager, Pull(_, A<const vector<int32_t>&>(), _, _)).Times(Exactly(1));
+
+ // Expect callback to be invoked.
+ EXPECT_CALL(*callback, onSubscriptionData(_, _)).Times(Exactly(1));
+
+ // This pull should trigger a cache flush.
+ shellSubscriberClient->pullAndSendHeartbeatsIfNeeded(/* nowSecs= */ 70, /* nowMillis= */ 70'000,
+ /* nowNanos= */ 70'000'000'000);
+
+ EXPECT_THAT(reason, Eq(StatsSubscriptionCallbackReason::STATSD_INITIATED));
+
+ // Get ShellData proto from the bytes payload of the callback.
+ ShellData actualShellData;
+ ASSERT_TRUE(actualShellData.ParseFromArray(payload.data(), payload.size()));
+
+ EXPECT_THAT(actualShellData, EqShellData(getExpectedPulledData()));
+}
+
+TEST_F(ShellSubscriberCallbackPulledTest, testPullFrequencyTooShort) {
+ // Pull should NOT happen.
+ EXPECT_CALL(*pullerManager, Pull(_, A<const vector<int32_t>&>(), _, _)).Times(Exactly(0));
+
+ // This should not trigger a pull even though the timestamp passed in matches the pull interval
+ // specified in the config.
+ const int64_t sleepTimeMs =
+ shellSubscriberClient->pullAndSendHeartbeatsIfNeeded(2, 2000, 2'000'000'000);
+}
+
+TEST_F(ShellSubscriberCallbackPulledTest, testMinSleep) {
+ // Pull should NOT happen.
+ EXPECT_CALL(*pullerManager, Pull(_, A<const vector<int32_t>&>(), _, _)).Times(Exactly(0));
+
+ const int64_t sleepTimeMs =
+ shellSubscriberClient->pullAndSendHeartbeatsIfNeeded(59, 59'000, 59'000'000'000);
+
+ // Even though there is only 1000 ms left until the next pull, the sleep time returned is
+ // kMinCallbackSleepIntervalMs.
+ EXPECT_THAT(sleepTimeMs, Eq(ShellSubscriberClient::kMinCallbackSleepIntervalMs));
+}
+
+TEST(ShellSubscriberTest, testPushedSubscription) {
+ sp<MockUidMap> uidMap = new NaggyMock<MockUidMap>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ vector<std::shared_ptr<LogEvent>> pushedList = getPushedEvents();
+
+ // create a simple config to get screen events
+ ShellSubscription config;
+ config.add_pushed()->set_atom_id(SCREEN_STATE_CHANGED);
+
+ // this is the expected screen event atom.
+ vector<ShellData> expectedData;
+ ShellData shellData1;
+ shellData1.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
+ shellData1.add_elapsed_timestamp_nanos(pushedList[0]->GetElapsedTimestampNs());
+ ShellData shellData2;
+ shellData2.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_OFF);
+ shellData2.add_elapsed_timestamp_nanos(pushedList[1]->GetElapsedTimestampNs());
+ expectedData.push_back(shellData1);
+ expectedData.push_back(shellData2);
+
+ // Test with single client
+ TRACE_CALL(runShellTest, config, uidMap, pullerManager, pushedList, expectedData,
+ kSingleClient);
+
+ // Test with multiple client
+ TRACE_CALL(runShellTest, config, uidMap, pullerManager, pushedList, expectedData, kNumClients);
+}
+
TEST(ShellSubscriberTest, testPulledSubscription) {
sp<MockUidMap> uidMap = new NaggyMock<MockUidMap>();
-
sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
const vector<int32_t> uids = {AID_SYSTEM};
- EXPECT_CALL(*pullerManager, Pull(10016, uids, _, _))
+ EXPECT_CALL(*pullerManager, Pull(CPU_ACTIVE_TIME, uids, _, _))
.WillRepeatedly(Invoke([](int tagId, const vector<int32_t>&, const int64_t,
vector<std::shared_ptr<LogEvent>>* data) {
data->clear();
@@ -198,10 +662,237 @@
data->push_back(makeCpuActiveTimeAtom(/*uid=*/kUid2, /*timeMillis=*/kCpuTime2));
return true;
}));
- runShellTest(getPulledConfig(), uidMap, pullerManager, vector<std::shared_ptr<LogEvent>>(),
- getExpectedShellData());
+
+ // Test with single client
+ TRACE_CALL(runShellTest, getPulledConfig(), uidMap, pullerManager, /*pushedEvents=*/{},
+ {getExpectedPulledData()}, kSingleClient);
+
+ // Test with multiple clients.
+ TRACE_CALL(runShellTest, getPulledConfig(), uidMap, pullerManager, {},
+ {getExpectedPulledData()}, kNumClients);
+}
+
+TEST(ShellSubscriberTest, testBothSubscriptions) {
+ sp<MockUidMap> uidMap = new NaggyMock<MockUidMap>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ const vector<int32_t> uids = {AID_SYSTEM};
+ EXPECT_CALL(*pullerManager, Pull(CPU_ACTIVE_TIME, uids, _, _))
+ .WillRepeatedly(Invoke([](int tagId, const vector<int32_t>&, const int64_t,
+ vector<std::shared_ptr<LogEvent>>* data) {
+ data->clear();
+ data->push_back(makeCpuActiveTimeAtom(/*uid=*/kUid1, /*timeMillis=*/kCpuTime1));
+ data->push_back(makeCpuActiveTimeAtom(/*uid=*/kUid2, /*timeMillis=*/kCpuTime2));
+ return true;
+ }));
+
+ vector<std::shared_ptr<LogEvent>> pushedList = getPushedEvents();
+
+ ShellSubscription config = getPulledConfig();
+ config.add_pushed()->set_atom_id(SCREEN_STATE_CHANGED);
+
+ vector<ShellData> expectedData;
+ ShellData shellData1;
+ shellData1.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
+ shellData1.add_elapsed_timestamp_nanos(pushedList[0]->GetElapsedTimestampNs());
+ ShellData shellData2;
+ shellData2.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_OFF);
+ shellData2.add_elapsed_timestamp_nanos(pushedList[1]->GetElapsedTimestampNs());
+ expectedData.push_back(getExpectedPulledData());
+ expectedData.push_back(shellData1);
+ expectedData.push_back(shellData2);
+
+ // Test with single client
+ TRACE_CALL(runShellTest, config, uidMap, pullerManager, pushedList, expectedData,
+ kSingleClient);
+
+ // Test with multiple client
+ TRACE_CALL(runShellTest, config, uidMap, pullerManager, pushedList, expectedData, kNumClients);
+}
+
+TEST(ShellSubscriberTest, testMaxSizeGuard) {
+ sp<MockUidMap> uidMap = new NaggyMock<MockUidMap>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+ sp<ShellSubscriber> shellManager =
+ new ShellSubscriber(uidMap, pullerManager, /*LogEventFilter=*/nullptr);
+
+ // set up 2 pipes for read/write config and data
+ int fds_config[2];
+ ASSERT_EQ(0, pipe2(fds_config, O_CLOEXEC));
+
+ int fds_data[2];
+ ASSERT_EQ(0, pipe2(fds_data, O_CLOEXEC));
+
+ // write invalid size of the config
+ size_t invalidBufferSize = (shellManager->getMaxSizeKb() * 1024) + 1;
+ write(fds_config[1], &invalidBufferSize, sizeof(invalidBufferSize));
+ close(fds_config[1]);
+ close(fds_data[0]);
+
+ EXPECT_FALSE(shellManager->startNewSubscription(fds_config[0], fds_data[1], /*timeoutSec=*/-1));
+ close(fds_config[0]);
+ close(fds_data[1]);
+}
+
+TEST(ShellSubscriberTest, testMaxSubscriptionsGuard) {
+ sp<MockUidMap> uidMap = new NaggyMock<MockUidMap>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+ sp<ShellSubscriber> shellManager =
+ new ShellSubscriber(uidMap, pullerManager, /*LogEventFilter=*/nullptr);
+
+ // create a simple config to get screen events
+ ShellSubscription config;
+ config.add_pushed()->set_atom_id(SCREEN_STATE_CHANGED);
+
+ size_t bufferSize = config.ByteSize();
+ vector<uint8_t> buffer(bufferSize);
+ config.SerializeToArray(&buffer[0], bufferSize);
+
+ size_t maxSubs = shellManager->getMaxSubscriptions();
+ int fds_configs[maxSubs + 1][2];
+ int fds_datas[maxSubs + 1][2];
+ for (int i = 0; i < maxSubs; i++) {
+ // set up 2 pipes for read/write config and data
+ ASSERT_EQ(0, pipe2(fds_configs[i], O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(fds_datas[i], O_CLOEXEC));
+
+ // write the config to pipe, first write size of the config
+ write(fds_configs[i][1], &bufferSize, sizeof(bufferSize));
+ // then write config itself
+ write(fds_configs[i][1], buffer.data(), bufferSize);
+ close(fds_configs[i][1]);
+
+ EXPECT_TRUE(shellManager->startNewSubscription(fds_configs[i][0], fds_datas[i][1],
+ /*timeoutSec=*/-1));
+ close(fds_configs[i][0]);
+ close(fds_datas[i][1]);
+ }
+ ASSERT_EQ(0, pipe2(fds_configs[maxSubs], O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(fds_datas[maxSubs], O_CLOEXEC));
+
+ // write the config to pipe, first write size of the config
+ write(fds_configs[maxSubs][1], &bufferSize, sizeof(bufferSize));
+ // then write config itself
+ write(fds_configs[maxSubs][1], buffer.data(), bufferSize);
+ close(fds_configs[maxSubs][1]);
+
+ EXPECT_FALSE(shellManager->startNewSubscription(fds_configs[maxSubs][0], fds_datas[maxSubs][1],
+ /*timeoutSec=*/-1));
+ close(fds_configs[maxSubs][0]);
+ close(fds_datas[maxSubs][1]);
+
+ // Not closing fds_datas[i][0] because this causes writes within ShellSubscriberClient to hang
+}
+
+TEST(ShellSubscriberTest, testDifferentConfigs) {
+ sp<MockUidMap> uidMap = new NaggyMock<MockUidMap>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+ sp<ShellSubscriber> shellManager =
+ new ShellSubscriber(uidMap, pullerManager, /*LogEventFilter=*/nullptr);
+
+ // number of different configs
+ int numConfigs = 2;
+
+ // create a simple config to get screen events
+ ShellSubscription configs[numConfigs];
+ configs[0].add_pushed()->set_atom_id(SCREEN_STATE_CHANGED);
+ configs[1].add_pushed()->set_atom_id(PLUGGED_STATE_CHANGED);
+
+ vector<vector<uint8_t>> configBuffers;
+ for (int i = 0; i < numConfigs; i++) {
+ size_t bufferSize = configs[i].ByteSize();
+ vector<uint8_t> buffer(bufferSize);
+ configs[i].SerializeToArray(&buffer[0], bufferSize);
+ configBuffers.push_back(buffer);
+ }
+
+ int fds_configs[numConfigs][2];
+ int fds_datas[numConfigs][2];
+ for (int i = 0; i < numConfigs; i++) {
+ // set up 2 pipes for read/write config and data
+ ASSERT_EQ(0, pipe2(fds_configs[i], O_CLOEXEC));
+ ASSERT_EQ(0, pipe2(fds_datas[i], O_CLOEXEC));
+
+ size_t configSize = configBuffers[i].size();
+ // write the config to pipe, first write size of the config
+ write(fds_configs[i][1], &configSize, sizeof(configSize));
+ // then write config itself
+ write(fds_configs[i][1], configBuffers[i].data(), configSize);
+ close(fds_configs[i][1]);
+
+ EXPECT_TRUE(shellManager->startNewSubscription(fds_configs[i][0], fds_datas[i][1],
+ /*timeoutSec=*/-1));
+ close(fds_configs[i][0]);
+ close(fds_datas[i][1]);
+ }
+
+ // send a log event that matches the config.
+ vector<std::shared_ptr<LogEvent>> pushedList = getPushedEvents();
+ for (const auto& event : pushedList) {
+ shellManager->onLogEvent(*event);
+ }
+
+ // Validate Config 1
+ ShellData actual1 = readData(fds_datas[0][0]);
+ ShellData expected1;
+ expected1.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_ON);
+ expected1.add_elapsed_timestamp_nanos(pushedList[0]->GetElapsedTimestampNs());
+ EXPECT_THAT(expected1, EqShellData(actual1));
+
+ ShellData actual2 = readData(fds_datas[0][0]);
+ ShellData expected2;
+ expected2.add_atom()->mutable_screen_state_changed()->set_state(
+ ::android::view::DisplayStateEnum::DISPLAY_STATE_OFF);
+ expected2.add_elapsed_timestamp_nanos(pushedList[1]->GetElapsedTimestampNs());
+ EXPECT_THAT(expected2, EqShellData(actual2));
+
+ // Validate Config 2, repeating the process
+ ShellData actual3 = readData(fds_datas[1][0]);
+ ShellData expected3;
+ expected3.add_atom()->mutable_plugged_state_changed()->set_state(
+ BatteryPluggedStateEnum::BATTERY_PLUGGED_USB);
+ expected3.add_elapsed_timestamp_nanos(pushedList[2]->GetElapsedTimestampNs());
+ EXPECT_THAT(expected3, EqShellData(actual3));
+
+ ShellData actual4 = readData(fds_datas[1][0]);
+ ShellData expected4;
+ expected4.add_atom()->mutable_plugged_state_changed()->set_state(
+ BatteryPluggedStateEnum::BATTERY_PLUGGED_NONE);
+ expected4.add_elapsed_timestamp_nanos(pushedList[3]->GetElapsedTimestampNs());
+ EXPECT_THAT(expected4, EqShellData(actual4));
+
+ // Not closing fds_datas[i][0] because this causes writes within ShellSubscriberClient to hang
+}
+
+TEST(ShellSubscriberTest, testPushedSubscriptionRestrictedEvent) {
+ sp<MockUidMap> uidMap = new NaggyMock<MockUidMap>();
+ sp<MockStatsPullerManager> pullerManager = new StrictMock<MockStatsPullerManager>();
+
+ std::vector<shared_ptr<LogEvent>> pushedList;
+ pushedList.push_back(CreateRestrictedLogEvent(/*atomTag=*/10, /*timestamp=*/1000));
+
+ // create a simple config to get screen events
+ ShellSubscription config;
+ config.add_pushed()->set_atom_id(10);
+
+ // expect empty data
+ vector<ShellData> expectedData;
+
+ // Test with single client
+ TRACE_CALL(runShellTest, config, uidMap, pullerManager, pushedList, expectedData,
+ kSingleClient);
+
+ // Test with multiple client
+ TRACE_CALL(runShellTest, config, uidMap, pullerManager, pushedList, expectedData, kNumClients);
}
#else
GTEST_LOG_(INFO) << "This test does nothing.\n";
#endif
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/tests/state/StateTracker_test.cpp b/statsd/tests/state/StateTracker_test.cpp
index 6516c15..d2fbb3f 100644
--- a/statsd/tests/state/StateTracker_test.cpp
+++ b/statsd/tests/state/StateTracker_test.cpp
@@ -113,6 +113,7 @@
mgr.registerListener(util::SCREEN_STATE_CHANGED, listener1);
EXPECT_EQ(1, mgr.getStateTrackersCount());
EXPECT_EQ(1, StateManager::getInstance().getStateTrackersCount());
+ mgr.unregisterListener(util::SCREEN_STATE_CHANGED, listener1);
}
TEST(StateManagerTest, TestOnLogEvent) {
diff --git a/statsd/tests/statsd_test_util.cpp b/statsd/tests/statsd_test_util.cpp
index 75e4904..db21817 100644
--- a/statsd/tests/statsd_test_util.cpp
+++ b/statsd/tests/statsd_test_util.cpp
@@ -20,7 +20,6 @@
#include <android-base/stringprintf.h>
#include "matchers/SimpleAtomMatchingTracker.h"
-#include "stats_annotations.h"
#include "stats_event.h"
#include "stats_util.h"
@@ -34,6 +33,25 @@
namespace os {
namespace statsd {
+bool StatsServiceConfigTest::sendConfig(const StatsdConfig& config) {
+ string str;
+ config.SerializeToString(&str);
+ std::vector<uint8_t> configAsVec(str.begin(), str.end());
+ return service->addConfiguration(kConfigKey, configAsVec, kCallingUid).isOk();
+}
+
+ConfigMetricsReport StatsServiceConfigTest::getReports(sp<StatsLogProcessor> processor,
+ int64_t timestamp, bool include_current) {
+ vector<uint8_t> output;
+ ConfigKey configKey(kCallingUid, kConfigKey);
+ processor->onDumpReport(configKey, timestamp, include_current /* include_current_bucket*/,
+ true /* erase_data */, ADB_DUMP, NO_TIME_CONSTRAINTS, &output);
+ ConfigMetricsReportList reports;
+ reports.ParseFromArray(output.data(), output.size());
+ EXPECT_EQ(1, reports.reports_size());
+ return reports.reports(0);
+}
+
StatsLogReport outputStreamToProto(ProtoOutputStream* proto) {
vector<uint8_t> bytes;
bytes.resize(proto->size());
@@ -86,6 +104,11 @@
ScheduledJobStateChanged::FINISHED);
}
+AtomMatcher CreateScheduleScheduledJobAtomMatcher() {
+ return CreateScheduledJobStateChangedAtomMatcher("ScheduledJobSchedule",
+ ScheduledJobStateChanged::SCHEDULED);
+}
+
AtomMatcher CreateScreenBrightnessChangedAtomMatcher() {
AtomMatcher atom_matcher;
atom_matcher.set_id(StringToId("ScreenBrightnessChanged"));
@@ -255,10 +278,8 @@
AtomMatcher CreateTestAtomRepeatedStateAtomMatcher(const string& name,
TestAtomReported::State state,
Position position) {
- AtomMatcher atom_matcher;
- atom_matcher.set_id(StringToId(name));
+ AtomMatcher atom_matcher = CreateSimpleAtomMatcher(name, util::TEST_ATOM_REPORTED);
auto simple_atom_matcher = atom_matcher.mutable_simple_atom_matcher();
- simple_atom_matcher->set_atom_id(util::TEST_ATOM_REPORTED);
auto field_value_matcher = simple_atom_matcher->add_field_value_matcher();
field_value_matcher->set_field(14); // Repeated enum field.
field_value_matcher->set_eq_int(state);
@@ -319,7 +340,7 @@
Predicate CreateScreenIsOffPredicate() {
Predicate predicate;
- predicate.set_id(1111123);
+ predicate.set_id(StringToId("ScreenIsOff"));
predicate.mutable_simple_predicate()->set_start(StringToId("ScreenTurnedOff"));
predicate.mutable_simple_predicate()->set_stop(StringToId("ScreenTurnedOn"));
return predicate;
@@ -335,7 +356,7 @@
Predicate CreateIsSyncingPredicate() {
Predicate predicate;
- predicate.set_id(33333333333333);
+ predicate.set_id(StringToId("IsSyncing"));
predicate.mutable_simple_predicate()->set_start(StringToId("SyncStart"));
predicate.mutable_simple_predicate()->set_stop(StringToId("SyncEnd"));
return predicate;
@@ -592,14 +613,14 @@
return metric;
}
-KllMetric createKllMetric(const string& name, const AtomMatcher& what, const int valueField,
+KllMetric createKllMetric(const string& name, const AtomMatcher& what, const int kllField,
const optional<int64_t>& condition) {
KllMetric metric;
metric.set_id(StringToId(name));
metric.set_what(what.id());
metric.set_bucket(TEN_MINUTES);
metric.mutable_kll_field()->set_field(what.simple_atom_matcher().atom_id());
- metric.mutable_kll_field()->add_child()->set_field(valueField);
+ metric.mutable_kll_field()->add_child()->set_field(kllField);
if (condition) {
metric.set_condition(condition.value());
}
@@ -790,7 +811,7 @@
AStatsEvent_overwriteTimestamp(statsEvent, eventTimeNs);
AStatsEvent_writeInt32(statsEvent, uid);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_writeInt32(statsEvent, data1);
AStatsEvent_writeInt32(statsEvent, data2);
@@ -803,7 +824,7 @@
AStatsEvent_setAtomId(statsEvent, atomId);
AStatsEvent_overwriteTimestamp(statsEvent, eventTimeNs);
AStatsEvent_writeInt32(statsEvent, uid);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_writeInt32(statsEvent, data1);
AStatsEvent_writeInt32Array(statsEvent, data2.data(), data2.size());
@@ -833,7 +854,7 @@
AStatsEvent* statsEvent = makeUidStatsEvent(atomId, eventTimeNs, uid1, data1, data2);
for (const int extraUid : extraUids) {
AStatsEvent_writeInt32(statsEvent, extraUid);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
}
shared_ptr<LogEvent> logEvent = std::make_shared<LogEvent>(/*uid=*/0, /*pid=*/0);
@@ -847,7 +868,7 @@
AStatsEvent_setAtomId(statsEvent, atomId);
AStatsEvent_overwriteTimestamp(statsEvent, eventTimeNs);
AStatsEvent_writeInt32Array(statsEvent, uids.data(), uids.size());
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
shared_ptr<LogEvent> logEvent = std::make_shared<LogEvent>(/*uid=*/0, /*pid=*/0);
parseStatsEventToLogEvent(statsEvent, logEvent.get());
@@ -861,7 +882,7 @@
AStatsEvent_setAtomId(statsEvent, atomId);
AStatsEvent_overwriteTimestamp(statsEvent, eventTimeNs);
AStatsEvent_writeInt32Array(statsEvent, uids.data(), uids.size());
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_writeInt32(statsEvent, data1);
AStatsEvent_writeInt32(statsEvent, data2);
@@ -878,7 +899,7 @@
AStatsEvent_setAtomId(statsEvent, atomId);
AStatsEvent_overwriteTimestamp(statsEvent, eventTimeNs);
AStatsEvent_writeInt32Array(statsEvent, uids.data(), uids.size());
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
AStatsEvent_writeInt32(statsEvent, data1);
AStatsEvent_writeInt32Array(statsEvent, data2.data(), data2.size());
@@ -931,8 +952,8 @@
AStatsEvent_setAtomId(statsEvent, util::SCREEN_STATE_CHANGED);
AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
AStatsEvent_writeInt32(statsEvent, state);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_EXCLUSIVE_STATE, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_STATE_NESTED, false);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_STATE_NESTED, false);
std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(loggerUid, /*pid=*/0);
parseStatsEventToLogEvent(statsEvent, logEvent.get());
@@ -944,8 +965,8 @@
AStatsEvent_setAtomId(statsEvent, util::BATTERY_SAVER_MODE_STATE_CHANGED);
AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
AStatsEvent_writeInt32(statsEvent, BatterySaverModeStateChanged::ON);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_EXCLUSIVE_STATE, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_STATE_NESTED, false);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_STATE_NESTED, false);
std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
parseStatsEventToLogEvent(statsEvent, logEvent.get());
@@ -957,8 +978,8 @@
AStatsEvent_setAtomId(statsEvent, util::BATTERY_SAVER_MODE_STATE_CHANGED);
AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
AStatsEvent_writeInt32(statsEvent, BatterySaverModeStateChanged::OFF);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_EXCLUSIVE_STATE, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_STATE_NESTED, false);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_STATE_NESTED, false);
std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
parseStatsEventToLogEvent(statsEvent, logEvent.get());
@@ -1020,6 +1041,15 @@
ScheduledJobStateChanged::FINISHED, timestampNs);
}
+// Create log event when scheduled job is scheduled.
+std::unique_ptr<LogEvent> CreateScheduleScheduledJobEvent(uint64_t timestampNs,
+ const vector<int>& attributionUids,
+ const vector<string>& attributionTags,
+ const string& jobName) {
+ return CreateScheduledJobStateChangedEvent(attributionUids, attributionTags, jobName,
+ ScheduledJobStateChanged::SCHEDULED, timestampNs);
+}
+
std::unique_ptr<LogEvent> CreateTestAtomReportedEventVariableRepeatedFields(
uint64_t timestampNs, const vector<int>& repeatedIntField,
const vector<int64_t>& repeatedLongField, const vector<float>& repeatedFloatField,
@@ -1081,14 +1111,15 @@
AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
writeAttribution(statsEvent, attributionUids, attributionTags);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID,
+ true);
AStatsEvent_writeInt32(statsEvent, android::os::WakeLockLevelEnum::PARTIAL_WAKE_LOCK);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true);
AStatsEvent_writeString(statsEvent, wakelockName.c_str());
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true);
AStatsEvent_writeInt32(statsEvent, state);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_EXCLUSIVE_STATE, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_STATE_NESTED, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_STATE_NESTED, true);
std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
parseStatsEventToLogEvent(statsEvent, logEvent.get());
@@ -1227,11 +1258,11 @@
AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
AStatsEvent_writeInt32(statsEvent, uid);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true);
AStatsEvent_writeInt32(statsEvent, state);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_EXCLUSIVE_STATE, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_STATE_NESTED, false);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_STATE_NESTED, false);
std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
parseStatsEventToLogEvent(statsEvent, logEvent.get());
@@ -1249,20 +1280,21 @@
AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
writeAttribution(statsEvent, attributionUids, attributionTags);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD_FIRST_UID,
+ true);
AStatsEvent_writeInt32(statsEvent, state);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_EXCLUSIVE_STATE, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_STATE_NESTED, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_STATE_NESTED, true);
if (state == util::BLE_SCAN_STATE_CHANGED__STATE__RESET) {
- AStatsEvent_addInt32Annotation(statsEvent, ANNOTATION_ID_TRIGGER_STATE_RESET,
+ AStatsEvent_addInt32Annotation(statsEvent, ASTATSLOG_ANNOTATION_ID_TRIGGER_STATE_RESET,
util::BLE_SCAN_STATE_CHANGED__STATE__OFF);
}
AStatsEvent_writeBool(statsEvent, filtered); // filtered
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true);
AStatsEvent_writeBool(statsEvent, firstMatch); // first match
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true);
AStatsEvent_writeBool(statsEvent, opportunistic); // opportunistic
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true);
std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
parseStatsEventToLogEvent(statsEvent, logEvent.get());
@@ -1278,14 +1310,14 @@
AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
AStatsEvent_writeInt32(statsEvent, uid);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_IS_UID, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_IS_UID, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true);
AStatsEvent_writeString(statsEvent, packageName.c_str());
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_PRIMARY_FIELD, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_PRIMARY_FIELD, true);
AStatsEvent_writeBool(statsEvent, usingAlertWindow);
AStatsEvent_writeInt32(statsEvent, state);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_EXCLUSIVE_STATE, true);
- AStatsEvent_addBoolAnnotation(statsEvent, ANNOTATION_ID_STATE_NESTED, false);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_EXCLUSIVE_STATE, true);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_STATE_NESTED, false);
std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
parseStatsEventToLogEvent(statsEvent, logEvent.get());
@@ -1313,10 +1345,62 @@
return logEvent;
}
+std::unique_ptr<LogEvent> CreateBleScanResultReceivedEvent(uint64_t timestampNs,
+ const vector<int>& attributionUids,
+ const vector<string>& attributionTags,
+ const int numResults) {
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, util::BLE_SCAN_RESULT_RECEIVED);
+ AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
+
+ writeAttribution(statsEvent, attributionUids, attributionTags);
+ AStatsEvent_writeInt32(statsEvent, numResults);
+
+ std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, logEvent.get());
+ return logEvent;
+}
+
+std::unique_ptr<LogEvent> CreateRestrictedLogEvent(int atomTag, int64_t timestampNs) {
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, atomTag);
+ AStatsEvent_addInt32Annotation(statsEvent, ASTATSLOG_ANNOTATION_ID_RESTRICTION_CATEGORY,
+ ASTATSLOG_RESTRICTION_CATEGORY_DIAGNOSTIC);
+ AStatsEvent_writeInt32(statsEvent, 10);
+ AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
+ std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, logEvent.get());
+ return logEvent;
+}
+
+std::unique_ptr<LogEvent> CreateNonRestrictedLogEvent(int atomTag, int64_t timestampNs) {
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, atomTag);
+ AStatsEvent_writeInt32(statsEvent, 10);
+ AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
+ std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, logEvent.get());
+ return logEvent;
+}
+
+std::unique_ptr<LogEvent> CreatePhoneSignalStrengthChangedEvent(
+ int64_t timestampNs, ::telephony::SignalStrengthEnum state) {
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, util::PHONE_SIGNAL_STRENGTH_CHANGED);
+ AStatsEvent_addBoolAnnotation(statsEvent, ASTATSLOG_ANNOTATION_ID_TRUNCATE_TIMESTAMP, true);
+ AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
+ AStatsEvent_writeInt32(statsEvent, state);
+
+ std::unique_ptr<LogEvent> logEvent = std::make_unique<LogEvent>(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, logEvent.get());
+ return logEvent;
+}
+
sp<StatsLogProcessor> CreateStatsLogProcessor(const int64_t timeBaseNs, const int64_t currentTimeNs,
const StatsdConfig& config, const ConfigKey& key,
const shared_ptr<IPullAtomCallback>& puller,
- const int32_t atomTag, const sp<UidMap> uidMap) {
+ const int32_t atomTag, const sp<UidMap> uidMap,
+ const shared_ptr<LogEventFilter>& logEventFilter) {
sp<StatsPullerManager> pullerManager = new StatsPullerManager();
if (puller != nullptr) {
pullerManager->RegisterPullAtomCallback(/*uid=*/0, atomTag, NS_PER_SEC, NS_PER_SEC * 10, {},
@@ -1330,14 +1414,45 @@
new AlarmMonitor(1,
[](const shared_ptr<IStatsCompanionService>&, int64_t){},
[](const shared_ptr<IStatsCompanionService>&){});
- sp<StatsLogProcessor> processor =
- new StatsLogProcessor(uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor,
- timeBaseNs, [](const ConfigKey&) { return true; },
- [](const int&, const vector<int64_t>&) {return true;});
+ sp<StatsLogProcessor> processor = new StatsLogProcessor(
+ uidMap, pullerManager, anomalyAlarmMonitor, periodicAlarmMonitor, timeBaseNs,
+ [](const ConfigKey&) { return true; },
+ [](const int&, const vector<int64_t>&) { return true; },
+ [](const ConfigKey&, const string&, const vector<int64_t>&) {}, logEventFilter);
+
processor->OnConfigUpdated(currentTimeNs, key, config);
return processor;
}
+LogEventFilter::AtomIdSet CreateAtomIdSetDefault() {
+ LogEventFilter::AtomIdSet resultList(std::move(StatsLogProcessor::getDefaultAtomIdSet()));
+ StateManager::getInstance().addAllAtomIds(resultList);
+ return resultList;
+}
+
+LogEventFilter::AtomIdSet CreateAtomIdSetFromConfig(const StatsdConfig& config) {
+ LogEventFilter::AtomIdSet resultList(std::move(StatsLogProcessor::getDefaultAtomIdSet()));
+
+ // Parse the config for atom ids. A combination atom matcher is a combination of (in the end)
+ // simple atom matchers. So by adding all the atoms from the simple atom matchers
+ // function adds all of the atoms.
+ for (int i = 0; i < config.atom_matcher_size(); i++) {
+ const AtomMatcher& matcher = config.atom_matcher(i);
+ if (matcher.has_simple_atom_matcher()) {
+ EXPECT_TRUE(matcher.simple_atom_matcher().has_atom_id());
+ resultList.insert(matcher.simple_atom_matcher().atom_id());
+ }
+ }
+
+ for (int i = 0; i < config.state_size(); i++) {
+ const State& state = config.state(i);
+ EXPECT_TRUE(state.has_atom_id());
+ resultList.insert(state.atom_id());
+ }
+
+ return resultList;
+}
+
void sortLogEventsByTimestamp(std::vector<std::unique_ptr<LogEvent>> *events) {
std::sort(events->begin(), events->end(),
[](const std::unique_ptr<LogEvent>& a, const std::unique_ptr<LogEvent>& b) {
@@ -1487,17 +1602,19 @@
}
void ValidateCountBucket(const CountBucketInfo& countBucket, int64_t startTimeNs, int64_t endTimeNs,
- int64_t count) {
+ int64_t count, int64_t conditionTrueNs) {
EXPECT_EQ(countBucket.start_bucket_elapsed_nanos(), startTimeNs);
EXPECT_EQ(countBucket.end_bucket_elapsed_nanos(), endTimeNs);
EXPECT_EQ(countBucket.count(), count);
+ EXPECT_EQ(countBucket.condition_true_nanos(), conditionTrueNs);
}
void ValidateDurationBucket(const DurationBucketInfo& bucket, int64_t startTimeNs,
- int64_t endTimeNs, int64_t durationNs) {
+ int64_t endTimeNs, int64_t durationNs, int64_t conditionTrueNs) {
EXPECT_EQ(bucket.start_bucket_elapsed_nanos(), startTimeNs);
EXPECT_EQ(bucket.end_bucket_elapsed_nanos(), endTimeNs);
EXPECT_EQ(bucket.duration_nanos(), durationNs);
+ EXPECT_EQ(bucket.condition_true_nanos(), conditionTrueNs);
}
void ValidateGaugeBucketTimes(const GaugeBucketInfo& gaugeBucket, int64_t startTimeNs,
@@ -1692,6 +1809,8 @@
backfillStringInDimension(str_map, metric_report->mutable_gauge_metrics());
} else if (metric_report->has_value_metrics()) {
backfillStringInDimension(str_map, metric_report->mutable_value_metrics());
+ } else if (metric_report->has_kll_metrics()) {
+ backfillStringInDimension(str_map, metric_report->mutable_kll_metrics());
}
}
// Backfill the package names.
@@ -1764,20 +1883,20 @@
}
void backfillDimensionPath(StatsLogReport* report) {
- if (report->has_dimensions_path_in_what() || report->has_dimensions_path_in_condition()) {
+ if (report->has_dimensions_path_in_what()) {
auto whatPath = report->dimensions_path_in_what();
- auto conditionPath = report->dimensions_path_in_condition();
if (report->has_count_metrics()) {
- backfillDimensionPath(whatPath, conditionPath, report->mutable_count_metrics());
+ backfillDimensionPath(whatPath, report->mutable_count_metrics());
} else if (report->has_duration_metrics()) {
- backfillDimensionPath(whatPath, conditionPath, report->mutable_duration_metrics());
+ backfillDimensionPath(whatPath, report->mutable_duration_metrics());
} else if (report->has_gauge_metrics()) {
- backfillDimensionPath(whatPath, conditionPath, report->mutable_gauge_metrics());
+ backfillDimensionPath(whatPath, report->mutable_gauge_metrics());
} else if (report->has_value_metrics()) {
- backfillDimensionPath(whatPath, conditionPath, report->mutable_value_metrics());
+ backfillDimensionPath(whatPath, report->mutable_value_metrics());
+ } else if (report->has_kll_metrics()) {
+ backfillDimensionPath(whatPath, report->mutable_kll_metrics());
}
report->clear_dimensions_path_in_what();
- report->clear_dimensions_path_in_condition();
}
}
@@ -1972,6 +2091,131 @@
flagName.c_str()),
flagValue);
}
+
+PackageInfoSnapshot getPackageInfoSnapshot(const sp<UidMap> uidMap) {
+ ProtoOutputStream protoOutputStream;
+ uidMap->writeUidMapSnapshot(/* timestamp */ 1, /* includeVersionStrings */ true,
+ /* includeInstaller */ true, /* certificateHashSize */ UINT8_MAX,
+ /* interestingUids */ {},
+ /* installerIndices */ nullptr, /* str_set */ nullptr,
+ &protoOutputStream);
+
+ PackageInfoSnapshot packageInfoSnapshot;
+ outputStreamToProto(&protoOutputStream, &packageInfoSnapshot);
+ return packageInfoSnapshot;
+}
+
+PackageInfo buildPackageInfo(const string& name, const int32_t uid, const int64_t version,
+ const string& versionString, const optional<string> installer,
+ const vector<uint8_t>& certHash, const bool deleted,
+ const bool hashStrings, const optional<uint32_t> installerIndex) {
+ PackageInfo packageInfo;
+ packageInfo.set_version(version);
+ packageInfo.set_uid(uid);
+ packageInfo.set_deleted(deleted);
+ if (!certHash.empty()) {
+ packageInfo.set_truncated_certificate_hash(certHash.data(), certHash.size());
+ }
+ if (hashStrings) {
+ packageInfo.set_name_hash(Hash64(name));
+ packageInfo.set_version_string_hash(Hash64(versionString));
+ } else {
+ packageInfo.set_name(name);
+ packageInfo.set_version_string(versionString);
+ }
+ if (installer) {
+ if (installerIndex) {
+ packageInfo.set_installer_index(*installerIndex);
+ } else if (hashStrings) {
+ packageInfo.set_installer_hash(Hash64(*installer));
+ } else {
+ packageInfo.set_installer(*installer);
+ }
+ }
+ return packageInfo;
+}
+
+vector<PackageInfo> buildPackageInfos(
+ const vector<string>& names, const vector<int32_t>& uids, const vector<int64_t>& versions,
+ const vector<string>& versionStrings, const vector<string>& installers,
+ const vector<vector<uint8_t>>& certHashes, const vector<bool>& deleted,
+ const vector<uint32_t>& installerIndices, const bool hashStrings) {
+ vector<PackageInfo> packageInfos;
+ for (int i = 0; i < uids.size(); i++) {
+ const optional<uint32_t> installerIndex =
+ installerIndices.empty() ? nullopt : optional<uint32_t>(installerIndices[i]);
+ const optional<string> installer =
+ installers.empty() ? nullopt : optional<string>(installers[i]);
+ vector<uint8_t> certHash;
+ if (!certHashes.empty()) {
+ certHash = certHashes[i];
+ }
+ packageInfos.emplace_back(buildPackageInfo(names[i], uids[i], versions[i],
+ versionStrings[i], installer, certHash,
+ deleted[i], hashStrings, installerIndex));
+ }
+ return packageInfos;
+}
+
+StatsdStatsReport_PulledAtomStats getPulledAtomStats(int32_t atom_id) {
+ vector<uint8_t> statsBuffer;
+ StatsdStats::getInstance().dumpStats(&statsBuffer, false /*reset stats*/);
+ StatsdStatsReport statsReport;
+ StatsdStatsReport_PulledAtomStats pulledAtomStats;
+ if (!statsReport.ParseFromArray(&statsBuffer[0], statsBuffer.size())) {
+ return pulledAtomStats;
+ }
+ if (statsReport.pulled_atom_stats_size() == 0) {
+ return pulledAtomStats;
+ }
+ for (size_t i = 0; i < statsReport.pulled_atom_stats_size(); i++) {
+ if (statsReport.pulled_atom_stats(i).atom_id() == atom_id) {
+ return statsReport.pulled_atom_stats(i);
+ }
+ }
+ return pulledAtomStats;
+}
+
+void createStatsEvent(AStatsEvent* statsEvent, uint8_t typeId, uint32_t atomId) {
+ AStatsEvent_setAtomId(statsEvent, atomId);
+ fillStatsEventWithSampleValue(statsEvent, typeId);
+}
+
+void fillStatsEventWithSampleValue(AStatsEvent* statsEvent, uint8_t typeId) {
+ int int32Array[2] = {3, 6};
+ uint32_t uids[] = {1001, 1002};
+ const char* tags[] = {"tag1", "tag2"};
+
+ switch (typeId) {
+ case INT32_TYPE:
+ AStatsEvent_writeInt32(statsEvent, 10);
+ break;
+ case INT64_TYPE:
+ AStatsEvent_writeInt64(statsEvent, 1000L);
+ break;
+ case STRING_TYPE:
+ AStatsEvent_writeString(statsEvent, "test");
+ break;
+ case LIST_TYPE:
+ AStatsEvent_writeInt32Array(statsEvent, int32Array, 2);
+ break;
+ case FLOAT_TYPE:
+ AStatsEvent_writeFloat(statsEvent, 1.3f);
+ break;
+ case BOOL_TYPE:
+ AStatsEvent_writeBool(statsEvent, 1);
+ break;
+ case BYTE_ARRAY_TYPE:
+ AStatsEvent_writeByteArray(statsEvent, (uint8_t*)"test", strlen("test"));
+ break;
+ case ATTRIBUTION_CHAIN_TYPE:
+ AStatsEvent_writeAttributionChain(statsEvent, uids, tags, 2);
+ break;
+ default:
+ break;
+ }
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/statsd_test_util.h b/statsd/tests/statsd_test_util.h
index afafb5f..cba3ebf 100644
--- a/statsd/tests/statsd_test_util.h
+++ b/statsd/tests/statsd_test_util.h
@@ -16,12 +16,16 @@
#include <aidl/android/os/BnPendingIntentRef.h>
#include <aidl/android/os/BnPullAtomCallback.h>
+#include <aidl/android/os/BnStatsQueryCallback.h>
+#include <aidl/android/os/BnStatsSubscriptionCallback.h>
#include <aidl/android/os/IPullAtomCallback.h>
#include <aidl/android/os/IPullAtomResultReceiver.h>
+#include <aidl/android/os/StatsSubscriptionCallbackReason.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "src/StatsLogProcessor.h"
+#include "src/StatsService.h"
#include "src/flags/FlagProvider.h"
#include "src/hash.h"
#include "src/logd/LogEvent.h"
@@ -30,6 +34,7 @@
#include "src/stats_log.pb.h"
#include "src/stats_log_util.h"
#include "src/statsd_config.pb.h"
+#include "stats_annotations.h"
#include "stats_event.h"
#include "statslog_statsdtest.h"
@@ -39,11 +44,17 @@
using namespace testing;
using ::aidl::android::os::BnPullAtomCallback;
+using ::aidl::android::os::BnStatsQueryCallback;
+using ::aidl::android::os::BnStatsSubscriptionCallback;
using ::aidl::android::os::IPullAtomCallback;
using ::aidl::android::os::IPullAtomResultReceiver;
+using ::aidl::android::os::StatsSubscriptionCallbackReason;
using android::util::ProtoReader;
using google::protobuf::RepeatedPtrField;
using Status = ::ndk::ScopedAStatus;
+using PackageInfoSnapshot = UidMapping_PackageInfoSnapshot;
+using PackageInfo = UidMapping_PackageInfoSnapshot_PackageInfo;
+using ::ndk::SharedRefBase;
// Wrapper for assertion helpers called from tests to keep track of source location of failures.
// Example usage:
@@ -75,16 +86,86 @@
MOCK_METHOD(std::set<int32_t>, getAppUid, (const string& package), (const));
};
+class BasicMockLogEventFilter : public LogEventFilter {
+public:
+ MOCK_METHOD(void, setFilteringEnabled, (bool isEnabled), (override));
+ MOCK_METHOD(void, setAtomIds, (AtomIdSet tagIds, ConsumerId consumer), (override));
+};
+
class MockPendingIntentRef : public aidl::android::os::BnPendingIntentRef {
public:
MOCK_METHOD1(sendDataBroadcast, Status(int64_t lastReportTimeNs));
MOCK_METHOD1(sendActiveConfigsChangedBroadcast, Status(const vector<int64_t>& configIds));
+ MOCK_METHOD1(sendRestrictedMetricsChangedBroadcast, Status(const vector<int64_t>& metricIds));
MOCK_METHOD6(sendSubscriberBroadcast,
Status(int64_t configUid, int64_t configId, int64_t subscriptionId,
int64_t subscriptionRuleId, const vector<string>& cookies,
const StatsDimensionsValueParcel& dimensionsValueParcel));
};
+typedef StrictMock<BasicMockLogEventFilter> MockLogEventFilter;
+
+class MockStatsQueryCallback : public BnStatsQueryCallback {
+public:
+ MOCK_METHOD4(sendResults,
+ Status(const vector<string>& queryData, const vector<string>& columnNames,
+ const vector<int32_t>& columnTypes, int32_t rowCount));
+ MOCK_METHOD1(sendFailure, Status(const string& in_error));
+};
+
+class MockStatsSubscriptionCallback : public BnStatsSubscriptionCallback {
+public:
+ MOCK_METHOD(Status, onSubscriptionData,
+ (StatsSubscriptionCallbackReason in_reason,
+ const std::vector<uint8_t>& in_subscriptionPayload),
+ (override));
+};
+
+class StatsServiceConfigTest : public ::testing::Test {
+protected:
+ shared_ptr<StatsService> service;
+ const int kConfigKey = 789130123; // Randomly chosen
+ const int kCallingUid = 10100; // Randomly chosen
+
+ void SetUp() override {
+ service = createStatsService();
+ // Removing config file from data/misc/stats-service and data/misc/stats-data if present
+ ConfigKey configKey(kCallingUid, kConfigKey);
+ service->removeConfiguration(kConfigKey, kCallingUid);
+ service->mProcessor->onDumpReport(configKey, getElapsedRealtimeNs(),
+ false /* include_current_bucket*/, true /* erase_data */,
+ ADB_DUMP, NO_TIME_CONSTRAINTS, nullptr);
+ }
+
+ void TearDown() override {
+ // Cleaning up data/misc/stats-service and data/misc/stats-data
+ ConfigKey configKey(kCallingUid, kConfigKey);
+ service->removeConfiguration(kConfigKey, kCallingUid);
+ service->mProcessor->onDumpReport(configKey, getElapsedRealtimeNs(),
+ false /* include_current_bucket*/, true /* erase_data */,
+ ADB_DUMP, NO_TIME_CONSTRAINTS, nullptr);
+ }
+
+ virtual shared_ptr<StatsService> createStatsService() {
+ return SharedRefBase::make<StatsService>(new UidMap(), /* queue */ nullptr,
+ /* LogEventFilter */ nullptr);
+ }
+
+ bool sendConfig(const StatsdConfig& config);
+
+ ConfigMetricsReport getReports(sp<StatsLogProcessor> processor, int64_t timestamp,
+ bool include_current = false);
+};
+
+static void assertConditionTimer(const ConditionTimer& conditionTimer, bool condition,
+ int64_t timerNs, int64_t lastConditionTrueTimestampNs,
+ int64_t currentBucketStartDelayNs = 0) {
+ EXPECT_EQ(condition, conditionTimer.mCondition);
+ EXPECT_EQ(timerNs, conditionTimer.mTimerNs);
+ EXPECT_EQ(lastConditionTrueTimestampNs, conditionTimer.mLastConditionChangeTimestampNs);
+ EXPECT_EQ(currentBucketStartDelayNs, conditionTimer.mCurrentBucketStartDelayNs);
+}
+
// Converts a ProtoOutputStream to a StatsLogReport proto.
StatsLogReport outputStreamToProto(ProtoOutputStream* proto);
@@ -103,6 +184,9 @@
// Create AtomMatcher proto for a scheduled job is done.
AtomMatcher CreateFinishScheduledJobAtomMatcher();
+// Create AtomMatcher proto for cancelling a scheduled job.
+AtomMatcher CreateScheduleScheduledJobAtomMatcher();
+
// Create AtomMatcher proto for screen brightness state changed.
AtomMatcher CreateScreenBrightnessChangedAtomMatcher();
@@ -268,7 +352,7 @@
ValueMetric createValueMetric(const string& name, const AtomMatcher& what, const int valueField,
const optional<int64_t>& condition, const vector<int64_t>& states);
-KllMetric createKllMetric(const string& name, const AtomMatcher& what, const int valueField,
+KllMetric createKllMetric(const string& name, const AtomMatcher& what, const int kllField,
const optional<int64_t>& condition);
Alert createAlert(const string& name, const int64_t metricId, const int buckets,
@@ -374,6 +458,12 @@
const vector<string>& attributionTags,
const string& jobName);
+// Create log event when scheduled job schedules.
+std::unique_ptr<LogEvent> CreateScheduleScheduledJobEvent(uint64_t timestampNs,
+ const vector<int>& attributionUids,
+ const vector<string>& attributionTags,
+ const string& jobName);
+
// Create log event when battery saver starts.
std::unique_ptr<LogEvent> CreateBatterySaverOnEvent(uint64_t timestampNs);
// Create log event when battery saver stops.
@@ -437,12 +527,23 @@
AppStartOccurred::TransitionType type, const string& activity_name,
const string& calling_pkg_name, const bool is_instant_app, int64_t activity_start_msec);
+std::unique_ptr<LogEvent> CreateBleScanResultReceivedEvent(uint64_t timestampNs,
+ const vector<int>& attributionUids,
+ const vector<string>& attributionTags,
+ const int numResults);
+
std::unique_ptr<LogEvent> CreateTestAtomReportedEventVariableRepeatedFields(
uint64_t timestampNs, const vector<int>& repeatedIntField,
const vector<int64_t>& repeatedLongField, const vector<float>& repeatedFloatField,
const vector<string>& repeatedStringField, const bool* repeatedBoolField,
const size_t repeatedBoolFieldLength, const vector<int>& repeatedEnumField);
+std::unique_ptr<LogEvent> CreateRestrictedLogEvent(int atomTag, int64_t timestampNs = 0);
+std::unique_ptr<LogEvent> CreateNonRestrictedLogEvent(int atomTag, int64_t timestampNs = 0);
+
+std::unique_ptr<LogEvent> CreatePhoneSignalStrengthChangedEvent(
+ int64_t timestampNs, ::telephony::SignalStrengthEnum state);
+
std::unique_ptr<LogEvent> CreateTestAtomReportedEvent(
uint64_t timestampNs, const vector<int>& attributionUids,
const vector<string>& attributionTags, const int intField, const long longField,
@@ -453,12 +554,19 @@
const bool* repeatedBoolField, const size_t repeatedBoolFieldLength,
const vector<int>& repeatedEnumField);
+void createStatsEvent(AStatsEvent* statsEvent, uint8_t typeId, uint32_t atomId);
+
+void fillStatsEventWithSampleValue(AStatsEvent* statsEvent, uint8_t typeId);
+
// Create a statsd log event processor upon the start time in seconds, config and key.
-sp<StatsLogProcessor> CreateStatsLogProcessor(const int64_t timeBaseNs, const int64_t currentTimeNs,
- const StatsdConfig& config, const ConfigKey& key,
- const shared_ptr<IPullAtomCallback>& puller = nullptr,
- const int32_t atomTag = 0 /*for puller only*/,
- const sp<UidMap> = new UidMap());
+sp<StatsLogProcessor> CreateStatsLogProcessor(
+ const int64_t timeBaseNs, const int64_t currentTimeNs, const StatsdConfig& config,
+ const ConfigKey& key, const shared_ptr<IPullAtomCallback>& puller = nullptr,
+ const int32_t atomTag = 0 /*for puller only*/, const sp<UidMap> = new UidMap(),
+ const shared_ptr<LogEventFilter>& logEventFilter = nullptr);
+
+LogEventFilter::AtomIdSet CreateAtomIdSetDefault();
+LogEventFilter::AtomIdSet CreateAtomIdSetFromConfig(const StatsdConfig& config);
// Util function to sort the log events by timestamp.
void sortLogEventsByTimestamp(std::vector<std::unique_ptr<LogEvent>> *events);
@@ -484,9 +592,9 @@
int atomId, int64_t value);
void ValidateCountBucket(const CountBucketInfo& countBucket, int64_t startTimeNs, int64_t endTimeNs,
- int64_t count);
+ int64_t count, int64_t conditionTrueNs = 0);
void ValidateDurationBucket(const DurationBucketInfo& bucket, int64_t startTimeNs,
- int64_t endTimeNs, int64_t durationNs);
+ int64_t endTimeNs, int64_t durationNs, int64_t conditionTrueNs = 0);
void ValidateGaugeBucketTimes(const GaugeBucketInfo& gaugeBucket, int64_t startTimeNs,
int64_t endTimeNs, vector<int64_t> eventTimesNs);
void ValidateValueBucket(const ValueBucketInfo& bucket, int64_t startTimeNs, int64_t endTimeNs,
@@ -531,9 +639,6 @@
if (data->has_dimensions_in_what()) {
backfillStringInDimension(str_map, data->mutable_dimensions_in_what());
}
- if (data->has_dimensions_in_condition()) {
- backfillStringInDimension(str_map, data->mutable_dimensions_in_condition());
- }
}
}
@@ -556,9 +661,7 @@
};
template <typename T>
-void backfillDimensionPath(const DimensionsValue& whatPath,
- const DimensionsValue& conditionPath,
- T* metricData) {
+void backfillDimensionPath(const DimensionsValue& whatPath, T* metricData) {
for (int i = 0; i < metricData->data_size(); ++i) {
auto data = metricData->mutable_data(i);
if (data->dimension_leaf_values_in_what_size() > 0) {
@@ -566,11 +669,6 @@
data->mutable_dimensions_in_what());
data->clear_dimension_leaf_values_in_what();
}
- if (data->dimension_leaf_values_in_condition_size() > 0) {
- backfillDimensionPath(conditionPath, data->dimension_leaf_values_in_condition(),
- data->mutable_dimensions_in_condition());
- data->clear_dimension_leaf_values_in_condition();
- }
}
}
@@ -654,6 +752,13 @@
}
}
+template <typename P>
+void outputStreamToProto(ProtoOutputStream* outputStream, P* proto) {
+ vector<uint8_t> bytes;
+ outputStream->serializeToVector(&bytes);
+ proto->ParseFromArray(bytes.data(), bytes.size());
+}
+
inline bool isAtLeastSFuncTrue() {
return true;
}
@@ -678,6 +783,37 @@
void writeBootFlag(const std::string& flagName, const std::string& flagValue);
+PackageInfoSnapshot getPackageInfoSnapshot(const sp<UidMap> uidMap);
+
+PackageInfo buildPackageInfo(const std::string& name, const int32_t uid, const int64_t version,
+ const std::string& versionString,
+ const std::optional<std::string> installer,
+ const std::vector<uint8_t>& certHash, const bool deleted,
+ const bool hashStrings, const optional<uint32_t> installerIndex);
+
+std::vector<PackageInfo> buildPackageInfos(
+ const std::vector<string>& names, const std::vector<int32_t>& uids,
+ const std::vector<int64_t>& versions, const std::vector<std::string>& versionStrings,
+ const std::vector<std::string>& installers,
+ const std::vector<std::vector<uint8_t>>& certHashes, const std::vector<bool>& deleted,
+ const std::vector<uint32_t>& installerIndices, const bool hashStrings);
+
+template <typename T>
+std::vector<T> concatenate(const vector<T>& a, const vector<T>& b) {
+ vector<T> result(a);
+ result.insert(result.end(), b.begin(), b.end());
+ return result;
+}
+
+StatsdStatsReport_PulledAtomStats getPulledAtomStats(int atom_id);
+
+template <typename P>
+std::vector<uint8_t> protoToBytes(const P& proto) {
+ const size_t byteSize = proto.ByteSizeLong();
+ vector<uint8_t> bytes(byteSize);
+ proto.SerializeToArray(bytes.data(), byteSize);
+ return bytes;
+}
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/statsd_test_util_test.cpp b/statsd/tests/statsd_test_util_test.cpp
new file mode 100644
index 0000000..3db5ec3
--- /dev/null
+++ b/statsd/tests/statsd_test_util_test.cpp
@@ -0,0 +1,186 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "statsd_test_util.h"
+
+#include "gtest_matchers.h"
+#include "src/stats_log.pb.h"
+
+namespace android {
+namespace os {
+namespace statsd {
+
+#ifdef __ANDROID__
+
+namespace {
+const string appName = "app1";
+const int32_t uid = 1000;
+const int64_t version = 1;
+const string versionString = "v1";
+const string installer = "com.android.vending";
+} // anonymous namespace
+
+TEST(StatsdTestUtil_PackageInfo, TestBuildPackageInfo) {
+ PackageInfo packageInfo = buildPackageInfo(appName, uid, version, versionString,
+ /* installer */ nullopt, /* certHash */ {},
+ /* deleted */ false, /* hashStrings */ false,
+ /* installerIndex */ nullopt);
+
+ EXPECT_THAT(packageInfo.version(), Eq(version));
+ EXPECT_THAT(packageInfo.uid(), Eq(uid));
+ EXPECT_THAT(packageInfo.deleted(), Eq(false));
+}
+
+// Set up parameterized test for testing with different values for pacakage certificate hashes.
+class StatsdTestUtil_PackageInfo_CertificateHash : public TestWithParam<vector<uint8_t>> {};
+INSTANTIATE_TEST_SUITE_P(
+ CertificateHashSizes, StatsdTestUtil_PackageInfo_CertificateHash,
+ Values(vector<uint8_t>(), vector<uint8_t>{'a'}, vector<uint8_t>{'a', 'b'}),
+ [](const TestParamInfo<StatsdTestUtil_PackageInfo_CertificateHash::ParamType>& info) {
+ if (info.param.empty()) {
+ return string("empty");
+ }
+ return string(info.param.begin(), info.param.end());
+ });
+TEST_P(StatsdTestUtil_PackageInfo_CertificateHash, TestBuildPackageInfoCertificateHash) {
+ const vector<uint8_t>& certHash = GetParam();
+ PackageInfo packageInfo = buildPackageInfo(appName, uid, version, versionString,
+ /* installer */ nullopt, certHash,
+ /* deleted */ false, /* hashStrings */ false,
+ /* installerIndex */ nullopt);
+
+ EXPECT_THAT(packageInfo.has_truncated_certificate_hash(), !certHash.empty());
+ string expectedCertHash(certHash.begin(), certHash.end());
+ EXPECT_THAT(packageInfo.truncated_certificate_hash(), Eq(expectedCertHash));
+}
+
+TEST(StatsdTestUtil_PackageInfo, TestBuildPackageInfoHashStrings) {
+ PackageInfo packageInfo = buildPackageInfo(appName, uid, version, versionString,
+ /* installer */ nullopt, /* certHash */ {},
+ /* deleted */ false, /* hashStrings */ true,
+ /* installerIndex */ nullopt);
+
+ EXPECT_TRUE(packageInfo.has_name_hash());
+ EXPECT_THAT(packageInfo.name_hash(), Eq(Hash64(appName)));
+ EXPECT_FALSE(packageInfo.has_name());
+
+ EXPECT_TRUE(packageInfo.has_version_string_hash());
+ EXPECT_THAT(packageInfo.version_string_hash(), Eq(Hash64(versionString)));
+ EXPECT_FALSE(packageInfo.has_version_string());
+}
+
+TEST(StatsdTestUtil_PackageInfo, TestBuildPackageInfoNoHashStrings) {
+ PackageInfo packageInfo = buildPackageInfo(appName, uid, version, versionString,
+ /* installer */ nullopt, /* certHash */ {},
+ /* deleted */ false, /* hashStrings */ false,
+ /* installerIndex */ nullopt);
+
+ EXPECT_TRUE(packageInfo.has_name());
+ EXPECT_THAT(packageInfo.name(), Eq(appName));
+ EXPECT_FALSE(packageInfo.has_name_hash());
+
+ EXPECT_TRUE(packageInfo.has_version_string());
+ EXPECT_THAT(packageInfo.version_string(), Eq(versionString));
+ EXPECT_FALSE(packageInfo.has_version_string_hash());
+}
+
+// Test with multiple permutations of installerIndex(uint32_t) and hashString(bool) tuples.
+class StatsdTestUtil_PackageInfo_InstallerIndexAndHashStrings
+ : public TestWithParam<tuple<optional<uint32_t>, bool>> {};
+INSTANTIATE_TEST_SUITE_P(InstallerIndexAndHashStrings,
+ StatsdTestUtil_PackageInfo_InstallerIndexAndHashStrings,
+ Combine(Values(optional(2), nullopt), Bool()));
+TEST_P(StatsdTestUtil_PackageInfo_InstallerIndexAndHashStrings, TestBuildPackageInfoNoInstaller) {
+ const auto& [installerIndex, hashStrings] = GetParam();
+
+ PackageInfo packageInfo = buildPackageInfo(appName, uid, version, versionString,
+ /* installer */ nullopt, /* certHash */ {},
+ /* deleted */ false, hashStrings, installerIndex);
+
+ EXPECT_FALSE(packageInfo.has_installer_index());
+ EXPECT_FALSE(packageInfo.has_installer_hash());
+ EXPECT_FALSE(packageInfo.has_installer());
+}
+
+// Set up parameterized test for testing with different boolean values for hashStrings parameter
+// in buildPackageInfo()/buildPackageInfos()
+class StatsdTestUtil_PackageInfo_HashStrings : public TestWithParam<bool> {};
+INSTANTIATE_TEST_SUITE_P(HashStrings, StatsdTestUtil_PackageInfo_HashStrings, Bool(),
+ PrintToStringParamName());
+TEST_P(StatsdTestUtil_PackageInfo_HashStrings, TestBuildPackageInfoWithInstallerAndInstallerIndex) {
+ const bool hashStrings = GetParam();
+ PackageInfo packageInfo = buildPackageInfo(appName, uid, version, versionString, installer,
+ /* certHash */ {}, /* deleted */ false, hashStrings,
+ /* installerIndex */ 1);
+
+ EXPECT_THAT(packageInfo.installer_index(), Eq(1));
+ EXPECT_FALSE(packageInfo.has_installer_hash());
+ EXPECT_FALSE(packageInfo.has_installer());
+}
+
+TEST(StatsdTestUtil_PackageInfo, TestBuildPackageInfoWithInstallerNoInstallerIndexHashStrings) {
+ PackageInfo packageInfo =
+ buildPackageInfo(appName, uid, version, versionString, installer, /* certHash */ {},
+ /* deleted */ false, /* hashStrings */ true,
+ /* installerIndex */ nullopt);
+
+ EXPECT_FALSE(packageInfo.has_installer_index());
+ EXPECT_THAT(packageInfo.installer_hash(), Eq(Hash64(installer)));
+ EXPECT_FALSE(packageInfo.has_installer());
+}
+
+TEST(StatsdTestUtil_PackageInfo, TestBuildPackageInfoWithInstallerNoInstallerIndexNoHashStrings) {
+ PackageInfo packageInfo =
+ buildPackageInfo(appName, uid, version, versionString, installer, /* certHash */ {},
+ /* deleted */ false, /* hashStrings */ false,
+ /* installerIndex */ nullopt);
+
+ EXPECT_FALSE(packageInfo.has_installer_index());
+ EXPECT_THAT(packageInfo.installer(), Eq(installer));
+ EXPECT_FALSE(packageInfo.has_installer_hash());
+}
+
+TEST_P(StatsdTestUtil_PackageInfo_HashStrings, TestBuildPackageInfosEmptyOptionalParams) {
+ const bool hashStrings = GetParam();
+ vector<PackageInfo> packageInfos = buildPackageInfos(
+ {appName}, {uid}, {version}, {versionString}, /* installers */ {}, /* certHashes */ {},
+ /* deleted */ {false}, /* installerIndices */ {}, hashStrings);
+
+ PackageInfo packageInfo = buildPackageInfo(
+ appName, uid, version, versionString, /* installer */ nullopt, /* certHash */ {},
+ /* deleted */ false, hashStrings, /* installerIndex */ nullopt);
+
+ EXPECT_THAT(packageInfos, Pointwise(EqPackageInfo(), {packageInfo}));
+}
+
+TEST_P(StatsdTestUtil_PackageInfo_HashStrings, TestBuildPackageInfosNonEmptyOptionalParams) {
+ const bool hashStrings = GetParam();
+ vector<PackageInfo> packageInfos = buildPackageInfos(
+ {appName}, {uid}, {version}, {versionString}, {installer}, /* certHashes */ {{'a'}},
+ /* deleted */ {false}, /* installerIndices */ {3}, hashStrings);
+
+ PackageInfo packageInfo =
+ buildPackageInfo(appName, uid, version, versionString, installer, /* certHash */ {'a'},
+ /* deleted */ false, hashStrings, /* installerIndex */ 3);
+
+ EXPECT_THAT(packageInfos, Pointwise(EqPackageInfo(), {packageInfo}));
+}
+
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
+
+} // namespace statsd
+} // namespace os
+} // namespace android
diff --git a/statsd/tests/storage/StorageManager_test.cpp b/statsd/tests/storage/StorageManager_test.cpp
index 74eafbf..db80e77 100644
--- a/statsd/tests/storage/StorageManager_test.cpp
+++ b/statsd/tests/storage/StorageManager_test.cpp
@@ -12,11 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include "src/storage/StorageManager.h"
+
#include <android-base/unique_fd.h>
+#include <android-modules-utils/sdk_level.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <stdio.h>
-#include "src/storage/StorageManager.h"
+
+#include "android-base/stringprintf.h"
+#include "stats_log_util.h"
+#include "tests/statsd_test_util.h"
+#include "utils/DbUtils.h"
#ifdef __ANDROID__
@@ -25,6 +32,7 @@
namespace statsd {
using namespace testing;
+using android::modules::sdklevel::IsAtLeastU;
using std::make_shared;
using std::shared_ptr;
using std::vector;
@@ -196,6 +204,120 @@
clearLocalHistoryTestFiles();
}
+TEST(StorageManagerTest, TrainInfoReadWrite32To64BitTest) {
+ InstallTrainInfo trainInfo;
+ trainInfo.trainVersionCode = 12345;
+ trainInfo.trainName = "This is a train name #)$(&&$";
+ trainInfo.status = 1;
+ const char* expIds = "test_ids";
+ trainInfo.experimentIds.assign(expIds, expIds + strlen(expIds));
+
+ // Write the train info. fork the code to always write in 32 bit.
+ StorageManager::deleteSuffixedFiles(TRAIN_INFO_DIR, trainInfo.trainName.c_str());
+ std::string fileName = base::StringPrintf("%s/%ld_%s", TRAIN_INFO_DIR, (long)getWallClockSec(),
+ trainInfo.trainName.c_str());
+
+ int fd = open(fileName.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, S_IRUSR | S_IWUSR);
+ ASSERT_NE(fd, -1);
+
+ size_t result;
+ // Write the magic word
+ result = write(fd, &TRAIN_INFO_FILE_MAGIC, sizeof(TRAIN_INFO_FILE_MAGIC));
+ ASSERT_EQ(result, sizeof(TRAIN_INFO_FILE_MAGIC));
+
+ // Write the train version
+ const size_t trainVersionCodeByteCount = sizeof(trainInfo.trainVersionCode);
+ result = write(fd, &trainInfo.trainVersionCode, trainVersionCodeByteCount);
+ ASSERT_EQ(result, trainVersionCodeByteCount);
+
+ // Write # of bytes in trainName to file.
+ // NB: this is changed from size_t to int32_t for this test.
+ const int32_t trainNameSize = trainInfo.trainName.size();
+ const size_t trainNameSizeByteCount = sizeof(trainNameSize);
+ result = write(fd, (uint8_t*)&trainNameSize, trainNameSizeByteCount);
+ ASSERT_EQ(result, trainNameSizeByteCount);
+
+ // Write trainName to file
+ result = write(fd, trainInfo.trainName.c_str(), trainNameSize);
+ ASSERT_EQ(result, trainNameSize);
+
+ // Write status to file
+ const size_t statusByteCount = sizeof(trainInfo.status);
+ result = write(fd, (uint8_t*)&trainInfo.status, statusByteCount);
+ ASSERT_EQ(result, statusByteCount);
+
+ // Write experiment id count to file.
+ // NB: this is changed from size_t to int32_t for this test.
+ const int32_t experimentIdsCount = trainInfo.experimentIds.size();
+ const size_t experimentIdsCountByteCount = sizeof(experimentIdsCount);
+ result = write(fd, (uint8_t*)&experimentIdsCount, experimentIdsCountByteCount);
+ ASSERT_EQ(result, experimentIdsCountByteCount);
+
+ // Write experimentIds to file
+ for (size_t i = 0; i < experimentIdsCount; i++) {
+ const int64_t experimentId = trainInfo.experimentIds[i];
+ const size_t experimentIdByteCount = sizeof(experimentId);
+ result = write(fd, &experimentId, experimentIdByteCount);
+ ASSERT_EQ(result, experimentIdByteCount);
+ }
+
+ // Write bools to file
+ const size_t boolByteCount = sizeof(trainInfo.requiresStaging);
+ result = write(fd, (uint8_t*)&trainInfo.requiresStaging, boolByteCount);
+ ASSERT_EQ(result, boolByteCount);
+ result = write(fd, (uint8_t*)&trainInfo.rollbackEnabled, boolByteCount);
+ ASSERT_EQ(result, boolByteCount);
+ result = write(fd, (uint8_t*)&trainInfo.requiresLowLatencyMonitor, boolByteCount);
+ ASSERT_EQ(result, boolByteCount);
+ close(fd);
+
+ InstallTrainInfo trainInfoResult;
+ EXPECT_TRUE(StorageManager::readTrainInfo(trainInfo.trainName, trainInfoResult));
+
+ EXPECT_EQ(trainInfo.trainVersionCode, trainInfoResult.trainVersionCode);
+ ASSERT_EQ(trainInfo.trainName.size(), trainInfoResult.trainName.size());
+ EXPECT_EQ(trainInfo.trainName, trainInfoResult.trainName);
+ EXPECT_EQ(trainInfo.status, trainInfoResult.status);
+ ASSERT_EQ(trainInfo.experimentIds.size(), trainInfoResult.experimentIds.size());
+ EXPECT_EQ(trainInfo.experimentIds, trainInfoResult.experimentIds);
+}
+
+TEST(StorageManagerTest, DeleteUnmodifiedOldDbFiles) {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ ConfigKey key(123, 12345);
+ unique_ptr<LogEvent> event = CreateRestrictedLogEvent(/*atomTag=*/10, /*timestampNs=*/1000);
+ dbutils::createTableIfNeeded(key, /*metricId=*/1, *event);
+ EXPECT_TRUE(StorageManager::hasFile(
+ base::StringPrintf("%s/%s", STATS_RESTRICTED_DATA_DIR, "123_12345.db").c_str()));
+
+ int64_t wallClockSec = getWallClockSec() + (StatsdStats::kMaxAgeSecond + 1);
+ StorageManager::enforceDbGuardrails(STATS_RESTRICTED_DATA_DIR, wallClockSec,
+ /*maxBytes=*/INT_MAX);
+
+ EXPECT_FALSE(StorageManager::hasFile(
+ base::StringPrintf("%s/%s", STATS_RESTRICTED_DATA_DIR, "123_12345.db").c_str()));
+}
+
+TEST(StorageManagerTest, DeleteLargeDbFiles) {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ ConfigKey key(123, 12345);
+ unique_ptr<LogEvent> event = CreateRestrictedLogEvent(/*atomTag=*/10, /*timestampNs=*/1000);
+ dbutils::createTableIfNeeded(key, /*metricId=*/1, *event);
+ EXPECT_TRUE(StorageManager::hasFile(
+ base::StringPrintf("%s/%s", STATS_RESTRICTED_DATA_DIR, "123_12345.db").c_str()));
+
+ StorageManager::enforceDbGuardrails(STATS_RESTRICTED_DATA_DIR,
+ /*wallClockSec=*/getWallClockSec(),
+ /*maxBytes=*/0);
+
+ EXPECT_FALSE(StorageManager::hasFile(
+ base::StringPrintf("%s/%s", STATS_RESTRICTED_DATA_DIR, "123_12345.db").c_str()));
+}
+
} // namespace statsd
} // namespace os
} // namespace android
diff --git a/statsd/tests/utils/DbUtils_test.cpp b/statsd/tests/utils/DbUtils_test.cpp
new file mode 100644
index 0000000..8a9b337
--- /dev/null
+++ b/statsd/tests/utils/DbUtils_test.cpp
@@ -0,0 +1,435 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "utils/DbUtils.h"
+
+#include <android-modules-utils/sdk_level.h>
+#include <gtest/gtest.h>
+
+#include "android-base/stringprintf.h"
+#include "storage/StorageManager.h"
+#include "tests/statsd_test_util.h"
+
+#ifdef __ANDROID__
+
+using namespace std;
+
+namespace android {
+namespace os {
+namespace statsd {
+namespace dbutils {
+
+using android::modules::sdklevel::IsAtLeastU;
+using base::StringPrintf;
+
+namespace {
+const ConfigKey key = ConfigKey(111, 222);
+const int64_t metricId = 111;
+const int32_t tagId = 1;
+
+AStatsEvent* makeAStatsEvent(int32_t atomId, int64_t timestampNs) {
+ AStatsEvent* statsEvent = AStatsEvent_obtain();
+ AStatsEvent_setAtomId(statsEvent, atomId);
+ AStatsEvent_overwriteTimestamp(statsEvent, timestampNs);
+ return statsEvent;
+}
+
+LogEvent makeLogEvent(AStatsEvent* statsEvent) {
+ LogEvent event(/*uid=*/0, /*pid=*/0);
+ parseStatsEventToLogEvent(statsEvent, &event);
+ return event;
+}
+
+class DbUtilsTest : public ::testing::Test {
+public:
+ void SetUp() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ }
+ void TearDown() override {
+ if (!IsAtLeastU()) {
+ GTEST_SKIP();
+ }
+ deleteDb(key);
+ }
+};
+} // Anonymous namespace.
+
+TEST_F(DbUtilsTest, TestInsertString) {
+ int64_t eventElapsedTimeNs = 10000000000;
+
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeString(statsEvent, "test_string");
+ LogEvent logEvent = makeLogEvent(statsEvent);
+ vector<LogEvent> events{logEvent};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+ string err;
+ EXPECT_TRUE(insert(key, metricId, events, err));
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_111 ORDER BY elapsedTimestampNs";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre("1", to_string(eventElapsedTimeNs + 10), _, "test_string"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_TEXT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(DbUtilsTest, TestMaliciousString) {
+ int64_t eventElapsedTimeNs = 10000000000;
+
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeString(statsEvent, "111); DROP TABLE metric_111;--");
+ LogEvent logEvent = makeLogEvent(statsEvent);
+ vector<LogEvent> events{logEvent};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+ string err;
+ EXPECT_TRUE(insert(key, metricId, events, err));
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_111 ORDER BY elapsedTimestampNs";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre("1", to_string(eventElapsedTimeNs + 10), _,
+ "111); DROP TABLE metric_111;--"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_TEXT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(DbUtilsTest, TestInsertStringNegativeMetricId) {
+ int64_t eventElapsedTimeNs = 10000000000;
+ int64_t metricId2 = -111;
+
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeString(statsEvent, "111");
+ LogEvent logEvent = makeLogEvent(statsEvent);
+ vector<LogEvent> events{logEvent};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId2, logEvent));
+ string err;
+ EXPECT_TRUE(insert(key, metricId2, events, err));
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_n111 ORDER BY elapsedTimestampNs";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre("1", to_string(eventElapsedTimeNs + 10), _, "111"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_TEXT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(DbUtilsTest, TestInsertInteger) {
+ int64_t eventElapsedTimeNs = 10000000000;
+
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeInt32(statsEvent, 11);
+ AStatsEvent_writeInt64(statsEvent, 111);
+ LogEvent logEvent = makeLogEvent(statsEvent);
+ vector<LogEvent> events{logEvent};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+ string err;
+ EXPECT_TRUE(insert(key, metricId, events, err));
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_111 ORDER BY elapsedTimestampNs";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre("1", to_string(eventElapsedTimeNs + 10), _, "11", "111"));
+ EXPECT_THAT(columnTypes, ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER,
+ SQLITE_INTEGER, SQLITE_INTEGER));
+ EXPECT_THAT(columnNames, ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs",
+ "field_1", "field_2"));
+}
+
+TEST_F(DbUtilsTest, TestInsertFloat) {
+ int64_t eventElapsedTimeNs = 10000000000;
+
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeFloat(statsEvent, 11.0);
+ LogEvent logEvent = makeLogEvent(statsEvent);
+ vector<LogEvent> events{logEvent};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+ string err;
+ EXPECT_TRUE(insert(key, metricId, events, err));
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_111 ORDER BY elapsedTimestampNs";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre("1", to_string(eventElapsedTimeNs + 10), _, _));
+ EXPECT_FLOAT_EQ(/*field1=*/std::stof(rows[0][3]), 11.0);
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_FLOAT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(DbUtilsTest, TestInsertTwoEvents) {
+ int64_t eventElapsedTimeNs = 10000000000;
+
+ AStatsEvent* statsEvent1 = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeString(statsEvent1, "111");
+ LogEvent logEvent1 = makeLogEvent(statsEvent1);
+
+ AStatsEvent* statsEvent2 = makeAStatsEvent(tagId, eventElapsedTimeNs + 20);
+ AStatsEvent_writeString(statsEvent2, "222");
+ LogEvent logEvent2 = makeLogEvent(statsEvent2);
+
+ vector<LogEvent> events{logEvent1, logEvent2};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent1));
+ string err;
+ EXPECT_TRUE(insert(key, metricId, events, err));
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_111 ORDER BY elapsedTimestampNs";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 2);
+ EXPECT_THAT(rows[0], ElementsAre("1", to_string(eventElapsedTimeNs + 10), _, "111"));
+ EXPECT_THAT(rows[1], ElementsAre("1", to_string(eventElapsedTimeNs + 20), _, "222"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_TEXT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(DbUtilsTest, TestInsertTwoEventsEnforceTtl) {
+ int64_t eventElapsedTimeNs = 10000000000;
+ int64_t eventWallClockNs = 50000000000;
+
+ AStatsEvent* statsEvent1 = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeString(statsEvent1, "111");
+ LogEvent logEvent1 = makeLogEvent(statsEvent1);
+ logEvent1.setLogdWallClockTimestampNs(eventWallClockNs);
+
+ AStatsEvent* statsEvent2 = makeAStatsEvent(tagId, eventElapsedTimeNs + 20);
+ AStatsEvent_writeString(statsEvent2, "222");
+ LogEvent logEvent2 = makeLogEvent(statsEvent2);
+ logEvent2.setLogdWallClockTimestampNs(eventWallClockNs + eventElapsedTimeNs);
+
+ vector<LogEvent> events{logEvent1, logEvent2};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent1));
+ sqlite3* db = getDb(key);
+ string err;
+ EXPECT_TRUE(insert(db, metricId, events, err));
+ EXPECT_TRUE(flushTtl(db, metricId, eventWallClockNs));
+ closeDb(db);
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_111 ORDER BY elapsedTimestampNs";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre("1", to_string(eventElapsedTimeNs + 20), _, "222"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_TEXT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(DbUtilsTest, TestMaliciousQuery) {
+ int64_t eventElapsedTimeNs = 10000000000;
+
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeString(statsEvent, "111");
+ LogEvent logEvent = makeLogEvent(statsEvent);
+ vector<LogEvent> events{logEvent};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+ string err;
+ EXPECT_TRUE(insert(key, metricId, events, err));
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "DROP TABLE metric_111";
+ EXPECT_FALSE(query(key, zSql, rows, columnTypes, columnNames, err));
+ EXPECT_THAT(err, StartsWith("attempt to write a readonly database"));
+}
+
+TEST_F(DbUtilsTest, TestInsertStringIntegrityCheckPasses) {
+ int64_t eventElapsedTimeNs = 10000000000;
+
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeString(statsEvent, "111");
+ LogEvent logEvent = makeLogEvent(statsEvent);
+ vector<LogEvent> events{logEvent};
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+ string err;
+ EXPECT_TRUE(insert(key, metricId, events, err));
+ verifyIntegrityAndDeleteIfNecessary(key);
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_111 ORDER BY elapsedTimestampNs";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre("1", to_string(eventElapsedTimeNs + 10), _, "111"));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_INTEGER, SQLITE_TEXT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("atomId", "elapsedTimestampNs", "wallTimestampNs", "field_1"));
+}
+
+TEST_F(DbUtilsTest, TestInsertStringIntegrityCheckFails) {
+ int64_t eventElapsedTimeNs = 10000000000;
+
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, eventElapsedTimeNs + 10);
+ AStatsEvent_writeString(statsEvent, "111");
+ LogEvent logEvent = makeLogEvent(statsEvent);
+ vector<LogEvent> events{logEvent};
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+ string err;
+ EXPECT_TRUE(insert(key, metricId, events, err));
+
+ vector<string> randomData{"1232hasha14125ashfas21512sh31321"};
+ string fileName = StringPrintf("%s/%d_%lld.db", STATS_RESTRICTED_DATA_DIR, key.GetUid(),
+ (long long)key.GetId());
+ StorageManager::writeFile(fileName.c_str(), randomData.data(), randomData.size());
+ EXPECT_TRUE(StorageManager::hasFile(fileName.c_str()));
+ verifyIntegrityAndDeleteIfNecessary(key);
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM metric_111 ORDER BY elapsedTimestampNs";
+ EXPECT_FALSE(query(key, zSql, rows, columnTypes, columnNames, err));
+ EXPECT_THAT(err, StartsWith("unable to open database file"));
+ EXPECT_FALSE(StorageManager::hasFile(fileName.c_str()));
+}
+
+TEST_F(DbUtilsTest, TestEventCompatibilityEventMatchesTable) {
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, /*eventElapsedTime=*/10000000000);
+ AStatsEvent_writeString(statsEvent, "111");
+ AStatsEvent_writeFloat(statsEvent, 111.0);
+ AStatsEvent_writeInt32(statsEvent, 23);
+ LogEvent logEvent = makeLogEvent(statsEvent);
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+
+ EXPECT_TRUE(isEventCompatible(key, metricId, logEvent));
+}
+
+TEST_F(DbUtilsTest, TestEventCompatibilityEventDoesNotMatchesTable) {
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, /*eventElapsedTime=*/10000000000);
+ AStatsEvent_writeString(statsEvent, "111");
+ AStatsEvent_writeFloat(statsEvent, 111.0);
+ AStatsEvent_writeInt32(statsEvent, 23);
+ LogEvent logEvent = makeLogEvent(statsEvent);
+
+ AStatsEvent* statsEvent2 = makeAStatsEvent(tagId, /*eventElapsedTime=*/10000000000);
+ AStatsEvent_writeString(statsEvent2, "111");
+ AStatsEvent_writeFloat(statsEvent2, 111.0);
+ AStatsEvent_writeInt32(statsEvent2, 23);
+ AStatsEvent_writeInt32(statsEvent2, 25);
+ LogEvent logEvent2 = makeLogEvent(statsEvent2);
+
+ EXPECT_TRUE(createTableIfNeeded(key, metricId, logEvent));
+
+ EXPECT_FALSE(isEventCompatible(key, metricId, logEvent2));
+}
+
+TEST_F(DbUtilsTest, TestEventCompatibilityTableNotCreated) {
+ AStatsEvent* statsEvent = makeAStatsEvent(tagId, /*eventElapsedTime=*/10000000000);
+ AStatsEvent_writeString(statsEvent, "111");
+ AStatsEvent_writeFloat(statsEvent, 111.0);
+ AStatsEvent_writeInt32(statsEvent, 23);
+ LogEvent logEvent = makeLogEvent(statsEvent);
+
+ EXPECT_TRUE(isEventCompatible(key, metricId, logEvent));
+}
+
+TEST_F(DbUtilsTest, TestUpdateDeviceInfoTable) {
+ string err;
+ updateDeviceInfoTable(key, err);
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM device_info";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre(_, _, _, _, _, _, _, _, _, _));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT,
+ SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("sdkVersion", "model", "product", "hardware", "device", "osBuild",
+ "fingerprint", "brand", "manufacturer", "board"));
+}
+
+TEST_F(DbUtilsTest, TestUpdateDeviceInfoTableInvokeTwice) {
+ string err;
+ updateDeviceInfoTable(key, err);
+ updateDeviceInfoTable(key, err);
+
+ std::vector<int32_t> columnTypes;
+ std::vector<string> columnNames;
+ std::vector<std::vector<std::string>> rows;
+ string zSql = "SELECT * FROM device_info";
+ EXPECT_TRUE(query(key, zSql, rows, columnTypes, columnNames, err));
+
+ ASSERT_EQ(rows.size(), 1);
+ EXPECT_THAT(rows[0], ElementsAre(_, _, _, _, _, _, _, _, _, _));
+ EXPECT_THAT(columnTypes,
+ ElementsAre(SQLITE_INTEGER, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT,
+ SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT, SQLITE_TEXT));
+ EXPECT_THAT(columnNames,
+ ElementsAre("sdkVersion", "model", "product", "hardware", "device", "osBuild",
+ "fingerprint", "brand", "manufacturer", "board"));
+}
+
+} // namespace dbutils
+} // namespace statsd
+} // namespace os
+} // namespace android
+#else
+GTEST_LOG_(INFO) << "This test does nothing.\n";
+#endif
diff --git a/statsd/tools/localtools/Android.bp b/statsd/tools/localtools/Android.bp
index 7557c78..d452402 100644
--- a/statsd/tools/localtools/Android.bp
+++ b/statsd/tools/localtools/Android.bp
@@ -7,6 +7,7 @@
manifest: "localdrive_manifest.txt",
srcs: [
"src/com/android/statsd/shelltools/localdrive/*.java",
+ "src/com/android/statsd/shelltools/CustomExtensionRegistry.java",
"src/com/android/statsd/shelltools/Utils.java",
],
static_libs: [
@@ -19,6 +20,7 @@
name: "statsd_testdrive_lib",
srcs: [
"src/com/android/statsd/shelltools/testdrive/*.java",
+ "src/com/android/statsd/shelltools/CustomExtensionRegistry.java",
"src/com/android/statsd/shelltools/Utils.java",
],
static_libs: [
@@ -38,6 +40,10 @@
java_test_host {
name: "statsd_testdrive_test",
+ // tag this module as a test artifact
+ test_suites: [
+ "general-tests",
+ ],
srcs: ["test/com/android/statsd/shelltools/testdrive/*.java"],
static_libs: [
"statsd_testdrive_lib",
@@ -45,4 +51,10 @@
"platformprotos",
"guava",
],
+ data: [
+ "test/data/**/*.*",
+ ],
+ data_native_bins: [
+ "aprotoc",
+ ],
}
diff --git a/statsd/tools/localtools/TEST_MAPPING b/statsd/tools/localtools/TEST_MAPPING
new file mode 100644
index 0000000..866c77e
--- /dev/null
+++ b/statsd/tools/localtools/TEST_MAPPING
@@ -0,0 +1,24 @@
+{
+ "presubmit": [
+ {
+ "name": "statsd_testdrive_test",
+ "options": [
+ {
+ "exclude-annotation": "org.junit.Ignore"
+ },
+ {
+ // Exclude test methods that require aprotoc host dependencies - see http://b/118691442
+ "exclude-filter": "com.android.statsd.shelltools.testdrive.ConfigurationTest.testOnePushedFromExternalAtomsProto"
+ },
+ {
+ // Exclude test methods that require aprotoc host dependencies - see http://b/118691442
+ "exclude-filter": "com.android.statsd.shelltools.testdrive.ConfigurationTest.testOnePushedFromExternalAtomsProtoWithImportedAtom"
+ },
+ {
+ // Exclude test methods that require aprotoc host dependencies - see http://b/118691442
+ "exclude-filter": "com.android.statsd.shelltools.testdrive.ConfigurationTest.testOnePushedFromExternalAtomsProtoWithImportedEnum"
+ }
+ ]
+ }
+ ]
+}
diff --git a/statsd/tools/localtools/src/com/android/statsd/shelltools/CustomExtensionRegistry.java b/statsd/tools/localtools/src/com/android/statsd/shelltools/CustomExtensionRegistry.java
new file mode 100644
index 0000000..def616c
--- /dev/null
+++ b/statsd/tools/localtools/src/com/android/statsd/shelltools/CustomExtensionRegistry.java
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.statsd.shelltools;
+
+import com.android.internal.os.ExperimentIdsProto;
+import com.android.internal.os.UidDataProto;
+import com.android.os.ActiveConfigProto;
+import com.android.os.ShellConfig;
+import com.android.os.adservices.AdservicesExtensionAtoms;
+import com.android.os.automotive.caruilib.AutomotiveCaruilibAtoms;
+import com.android.os.devicelogs.DeviceLogsAtoms;
+import com.android.os.dnd.DndAtoms;
+import com.android.os.dnd.DndExtensionAtoms;
+import com.android.os.expresslog.ExpresslogExtensionAtoms;
+import com.android.os.framework.FrameworkExtensionAtoms;
+import com.android.os.gps.GpsAtoms;
+import com.android.os.grammaticalinflection.GrammaticalInflection;
+import com.android.os.hardware.biometrics.BiometricsAtoms;
+import com.android.os.healthfitness.api.ApiExtensionAtoms;
+import com.android.os.healthfitness.ui.UiExtensionAtoms;
+import com.android.os.hotword.HotwordAtoms;
+import com.android.os.kernel.KernelAtoms;
+import com.android.os.locale.LocaleAtoms;
+import com.android.os.location.LocationAtoms;
+import com.android.os.location.LocationExtensionAtoms;
+import com.android.os.media.MediaDrmAtoms;
+import com.android.os.memorysafety.MemorysafetyExtensionAtoms;
+import com.android.os.permissioncontroller.PermissioncontrollerExtensionAtoms;
+import com.android.os.providers.mediaprovider.MediaProviderAtoms;
+import com.android.os.settings.SettingsExtensionAtoms;
+import com.android.os.statsd.ShellDataProto;
+import com.android.os.sysui.SysuiAtoms;
+import com.android.os.telecom.TelecomExtensionAtom;
+import com.android.os.telephony.SatelliteExtensionAtoms;
+import com.android.os.telephony.TelephonyExtensionAtoms;
+import com.android.os.telephony.qns.QnsExtensionAtoms;
+import com.android.os.usb.UsbAtoms;
+import com.android.os.uwb.UwbExtensionAtoms;
+import com.android.os.view.inputmethod.InputmethodAtoms;
+import com.android.os.wear.media.WearMediaAtoms;
+import com.android.os.wear.media.WearMediaExtensionAtoms;
+import com.android.os.wearpas.WearpasExtensionAtoms;
+import com.android.os.wearservices.WearservicesAtoms;
+import com.android.os.wearservices.WearservicesExtensionAtoms;
+import com.android.os.wearsysui.WearsysuiAtoms;
+import com.android.os.wifi.WifiExtensionAtoms;
+import android.os.statsd.media.MediaCodecExtensionAtoms;
+import com.android.os.credentials.CredentialsExtensionAtoms;
+
+import com.google.protobuf.ExtensionRegistry;
+
+/**
+ * CustomExtensionRegistry for local use of statsd.
+ */
+public class CustomExtensionRegistry {
+
+ public static ExtensionRegistry REGISTRY;
+
+ static {
+ /** In Java, when parsing a message containing extensions, you must provide an
+ * ExtensionRegistry which contains definitions of all of the extensions which you
+ * want the parser to recognize. This is necessary because Java's bytecode loading
+ * semantics do not provide any way for the protocol buffers library to automatically
+ * discover all extensions defined in your binary.
+ *
+ * See http://sites/protocol-buffers/user-docs/miscellaneous-howtos/extensions
+ * #Java_ExtensionRegistry_
+ */
+ REGISTRY = ExtensionRegistry.newInstance();
+ registerAllExtensions(REGISTRY);
+ REGISTRY = REGISTRY.getUnmodifiable();
+ }
+
+ /**
+ * Registers all proto2 extensions.
+ */
+ private static void registerAllExtensions(ExtensionRegistry extensionRegistry) {
+ ExperimentIdsProto.registerAllExtensions(extensionRegistry);
+ UidDataProto.registerAllExtensions(extensionRegistry);
+ ActiveConfigProto.registerAllExtensions(extensionRegistry);
+ ShellConfig.registerAllExtensions(extensionRegistry);
+ AdservicesExtensionAtoms.registerAllExtensions(extensionRegistry);
+ AutomotiveCaruilibAtoms.registerAllExtensions(extensionRegistry);
+ DeviceLogsAtoms.registerAllExtensions(extensionRegistry);
+ DndAtoms.registerAllExtensions(extensionRegistry);
+ DndExtensionAtoms.registerAllExtensions(extensionRegistry);
+ ExpresslogExtensionAtoms.registerAllExtensions(extensionRegistry);
+ FrameworkExtensionAtoms.registerAllExtensions(extensionRegistry);
+ GpsAtoms.registerAllExtensions(extensionRegistry);
+ GrammaticalInflection.registerAllExtensions(extensionRegistry);
+ BiometricsAtoms.registerAllExtensions(extensionRegistry);
+ ApiExtensionAtoms.registerAllExtensions(extensionRegistry);
+ UiExtensionAtoms.registerAllExtensions(extensionRegistry);
+ HotwordAtoms.registerAllExtensions(extensionRegistry);
+ KernelAtoms.registerAllExtensions(extensionRegistry);
+ LocaleAtoms.registerAllExtensions(extensionRegistry);
+ LocationAtoms.registerAllExtensions(extensionRegistry);
+ LocationExtensionAtoms.registerAllExtensions(extensionRegistry);
+ MediaDrmAtoms.registerAllExtensions(extensionRegistry);
+ MemorysafetyExtensionAtoms.registerAllExtensions(extensionRegistry);
+ PermissioncontrollerExtensionAtoms.registerAllExtensions(extensionRegistry);
+ MediaProviderAtoms.registerAllExtensions(extensionRegistry);
+ SettingsExtensionAtoms.registerAllExtensions(extensionRegistry);
+ ShellDataProto.registerAllExtensions(extensionRegistry);
+ SysuiAtoms.registerAllExtensions(extensionRegistry);
+ TelecomExtensionAtom.registerAllExtensions(extensionRegistry);
+ SatelliteExtensionAtoms.registerAllExtensions(extensionRegistry);
+ TelephonyExtensionAtoms.registerAllExtensions(extensionRegistry);
+ QnsExtensionAtoms.registerAllExtensions(extensionRegistry);
+ UsbAtoms.registerAllExtensions(extensionRegistry);
+ UwbExtensionAtoms.registerAllExtensions(extensionRegistry);
+ InputmethodAtoms.registerAllExtensions(extensionRegistry);
+ WearMediaAtoms.registerAllExtensions(extensionRegistry);
+ WearMediaExtensionAtoms.registerAllExtensions(extensionRegistry);
+ WearpasExtensionAtoms.registerAllExtensions(extensionRegistry);
+ WearservicesAtoms.registerAllExtensions(extensionRegistry);
+ WearservicesExtensionAtoms.registerAllExtensions(extensionRegistry);
+ WearsysuiAtoms.registerAllExtensions(extensionRegistry);
+ WifiExtensionAtoms.registerAllExtensions(extensionRegistry);
+ MediaCodecExtensionAtoms.registerAllExtensions(extensionRegistry);
+ CredentialsExtensionAtoms.registerAllExtensions(extensionRegistry);
+ }
+}
diff --git a/statsd/tools/localtools/src/com/android/statsd/shelltools/Utils.java b/statsd/tools/localtools/src/com/android/statsd/shelltools/Utils.java
index 245e4c3..3da7222 100644
--- a/statsd/tools/localtools/src/com/android/statsd/shelltools/Utils.java
+++ b/statsd/tools/localtools/src/com/android/statsd/shelltools/Utils.java
@@ -19,7 +19,10 @@
import com.android.os.StatsLog.ConfigMetricsReportList;
import com.android.os.StatsLog.EventMetricData;
import com.android.os.StatsLog.StatsLogReport;
+
import com.google.common.io.Files;
+import com.google.protobuf.ExtensionRegistry;
+
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
@@ -38,6 +41,8 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import com.android.statsd.shelltools.CustomExtensionRegistry;
+
/**
* Utilities for local use of statsd.
*/
@@ -69,11 +74,14 @@
}
logger.severe(err.toString());
+ final String commandName = commands[0];
+
// Check result
if (process.waitFor() == 0) {
- logger.fine("Adb command successful.");
+ logger.fine("Command " + commandName + " is successful.");
} else {
- logger.severe("Abnormal adb shell termination for: " + String.join(",", commands));
+ logger.severe(
+ "Abnormal " + commandName + " termination for: " + String.join(",", commands));
throw new RuntimeException("Error running adb command: " + err.toString());
}
}
@@ -81,13 +89,11 @@
/**
* Dumps the report from the device and converts it to a ConfigMetricsReportList.
* Erases the data if clearData is true.
- * @param configId id of the config
- * @param clearData whether to erase the report data from statsd after getting the report.
+ *
+ * @param configId id of the config
+ * @param clearData whether to erase the report data from statsd after getting the report.
* @param useShellUid Pulls data for the {@link SHELL_UID} instead of the caller's uid.
- * @param logger Logger to log error messages
- * @return
- * @throws IOException
- * @throws InterruptedException
+ * @param logger Logger to log error messages
*/
public static ConfigMetricsReportList getReportList(long configId, boolean clearData,
boolean useShellUid, Logger logger, String deviceSerial)
@@ -109,26 +115,25 @@
"--include_current_bucket",
"--proto");
ConfigMetricsReportList reportList =
- ConfigMetricsReportList.parseFrom(new FileInputStream(outputFile));
+ ConfigMetricsReportList.parseFrom(new FileInputStream(outputFile),
+ CustomExtensionRegistry.REGISTRY);
return reportList;
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
logger.severe("Failed to fetch and parse the statsd output report. "
- + "Perhaps there is not a valid statsd config for the requested "
- + (useShellUid ? ("uid=" + SHELL_UID + ", ") : "")
- + "configId=" + configId
- + ".");
+ + "Perhaps there is not a valid statsd config for the requested "
+ + (useShellUid ? ("uid=" + SHELL_UID + ", ") : "")
+ + "configId=" + configId
+ + ".");
throw (e);
}
}
/**
* Logs an AppBreadcrumbReported atom.
- * @param label which label to log for the app breadcrumb atom.
- * @param state which state to log for the app breadcrumb atom.
- * @param logger Logger to log error messages
*
- * @throws IOException
- * @throws InterruptedException
+ * @param label which label to log for the app breadcrumb atom.
+ * @param state which state to log for the app breadcrumb atom.
+ * @param logger Logger to log error messages
*/
public static void logAppBreadcrumb(int label, int state, Logger logger, String deviceSerial)
throws IOException, InterruptedException {
@@ -143,6 +148,7 @@
String.valueOf(label),
String.valueOf(state));
}
+
public static void setUpLogger(Logger logger, boolean debug) {
ConsoleHandler handler = new ConsoleHandler();
handler.setFormatter(new LocalToolsFormatter());
@@ -209,6 +215,7 @@
/**
* Parse the result of "adb devices" to return the list of connected devices.
+ *
* @param logger Logger to log error messages
* @return List of the serial numbers of the connected devices.
*/
@@ -235,6 +242,7 @@
/**
* Returns ANDROID_SERIAL environment variable, or null if that is undefined or unavailable.
+ *
* @param logger Destination of error messages.
* @return String value of ANDROID_SERIAL environment variable, or null.
*/
@@ -250,10 +258,11 @@
/**
* Returns the device to use if one can be deduced, or null.
- * @param device Command-line specified device, or null.
+ *
+ * @param device Command-line specified device, or null.
* @param connectedDevices List of all connected devices.
- * @param defaultDevice Environment-variable specified device, or null.
- * @param logger Destination of error messages.
+ * @param defaultDevice Environment-variable specified device, or null.
+ * @param logger Destination of error messages.
* @return Device to use, or null.
*/
public static String chooseDevice(String device, List<String> connectedDevices,
@@ -307,9 +316,9 @@
StatsLog.AggregatedAtomInfo atomInfo = metricData.getAggregatedAtomInfo();
for (long timestamp : atomInfo.getElapsedTimestampNanosList()) {
data.add(EventMetricData.newBuilder()
- .setAtom(atomInfo.getAtom())
- .setElapsedTimestampNanos(timestamp)
- .build());
+ .setAtom(atomInfo.getAtom())
+ .setElapsedTimestampNanos(timestamp)
+ .build());
}
return data;
}
diff --git a/statsd/tools/localtools/src/com/android/statsd/shelltools/testdrive/TestDrive.java b/statsd/tools/localtools/src/com/android/statsd/shelltools/testdrive/TestDrive.java
index 7f8a93c..5be447c 100644
--- a/statsd/tools/localtools/src/com/android/statsd/shelltools/testdrive/TestDrive.java
+++ b/statsd/tools/localtools/src/com/android/statsd/shelltools/testdrive/TestDrive.java
@@ -24,18 +24,31 @@
import com.android.internal.os.StatsdConfigProto.SimpleAtomMatcher;
import com.android.internal.os.StatsdConfigProto.StatsdConfig;
import com.android.internal.os.StatsdConfigProto.TimeUnit;
+import com.android.os.AtomsProto;
import com.android.os.AtomsProto.Atom;
import com.android.os.StatsLog;
import com.android.os.StatsLog.ConfigMetricsReport;
import com.android.os.StatsLog.ConfigMetricsReportList;
import com.android.os.StatsLog.StatsLogReport;
+import com.android.os.telephony.qns.QnsExtensionAtoms;
import com.android.statsd.shelltools.Utils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.io.Files;
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.DescriptorProtos;
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.DynamicMessage;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
import java.io.File;
+import java.io.FileInputStream;
import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -74,47 +87,85 @@
"com.android.imsserviceentitlement",
"com.google.android.cellbroadcastreceiver",
"com.google.android.apps.nexuslauncher",
+ "com.google.android.markup",
+ "com.android.art",
+ "com.google.android.art",
"AID_KEYSTORE",
"AID_VIRTUALIZATIONSERVICE",
+ "com.google.android.permissioncontroller",
+ "AID_NFC",
+ "AID_SECURE_ELEMENT",
+ "com.google.android.wearable.media.routing",
+ "com.google.android.healthconnect.controller",
+ "com.android.telephony.qns",
};
private static final String[] DEFAULT_PULL_SOURCES = {
- "AID_KEYSTORE",
- "AID_RADIO",
- "AID_SYSTEM",
+ "AID_KEYSTORE", "AID_RADIO", "AID_SYSTEM",
};
private static final Logger LOGGER = Logger.getLogger(TestDrive.class.getName());
+ private static final String HW_ATOMS_PROTO_FILEPATH =
+ "hardware/google/pixel/pixelstats/pixelatoms.proto";
@VisibleForTesting
String mDeviceSerial = null;
@VisibleForTesting
Dumper mDumper = new BasicDumper();
boolean mPressToContinue = false;
+ Integer mReportCollectionDelayMillis = 60_000;
+ List<String> mProtoIncludes = new ArrayList<>();
public static void main(String[] args) {
final Configuration configuration = new Configuration();
final TestDrive testDrive = new TestDrive();
Utils.setUpLogger(LOGGER, false);
- if (!testDrive.processArgs(configuration, args,
- Utils.getDeviceSerials(LOGGER), Utils.getDefaultDevice(LOGGER))) {
+ if (!testDrive.processArgs(
+ configuration, args, Utils.getDeviceSerials(LOGGER),
+ Utils.getDefaultDevice(LOGGER))) {
return;
}
- final ConfigMetricsReportList reports = testDrive.testDriveAndGetReports(
- configuration.createConfig(), configuration.hasPulledAtoms(),
- configuration.hasPushedAtoms());
+ final ConfigMetricsReportList reports =
+ testDrive.testDriveAndGetReports(
+ configuration.createConfig(),
+ configuration.hasPulledAtoms(),
+ configuration.hasPushedAtoms());
if (reports != null) {
configuration.dumpMetrics(reports, testDrive.mDumper);
}
}
- boolean processArgs(Configuration configuration, String[] args, List<String> connectedDevices,
+ static void printUsageMessage() {
+ LOGGER.severe("Usage: ./test_drive [options] <atomId1> <atomId2> ... <atomIdN>");
+ LOGGER.severe("OPTIONS");
+ LOGGER.severe("-h, --help");
+ LOGGER.severe("\tPrint this message");
+ LOGGER.severe("-one");
+ LOGGER.severe("\tCreating one event metric to catch all pushed atoms");
+ LOGGER.severe("-i");
+ LOGGER.severe("\tPath to proto file to include (pixelatoms.proto, etc.)");
+ LOGGER.severe("\tPath is absolute or relative to current dir or to ANDROID_BUILD_TOP");
+ LOGGER.severe("-terse");
+ LOGGER.severe("\tTerse output format.");
+ LOGGER.severe("-p additional_allowed_packages_csv");
+ LOGGER.severe("\tAllows collection atoms from an additional packages");
+ LOGGER.severe("-s DEVICE_SERIAL_NUMBER");
+ LOGGER.severe("\tDevice serial number to use for adb communication");
+ LOGGER.severe("-e");
+ LOGGER.severe("\tWait for Enter key press before collecting report");
+ LOGGER.severe("-d delay_ms");
+ LOGGER.severe("\tWait for delay_ms before collecting report, default is 60000 ms");
+ LOGGER.severe("-v");
+ LOGGER.severe("\tDebug logging level");
+ }
+
+ boolean processArgs(
+ Configuration configuration,
+ String[] args,
+ List<String> connectedDevices,
String defaultDevice) {
if (args.length < 1) {
- LOGGER.severe("Usage: ./test_drive [-one] "
- + "[-p additional_allowed_package] "
- + "[-s DEVICE_SERIAL_NUMBER] "
- + "<atomId1> <atomId2> ... <atomIdN>");
+ printUsageMessage();
return false;
}
@@ -130,13 +181,37 @@
LOGGER.info("Terse output format.");
mDumper = new TerseDumper();
} else if (remaining_args >= 3 && arg.equals("-p")) {
- configuration.mAdditionalAllowedPackage = args[++first_arg];
+ Collections.addAll(configuration.mAdditionalAllowedPackages,
+ args[++first_arg].split(","));
+ } else if (remaining_args >= 3 && arg.equals("-i")) {
+ mProtoIncludes.add(args[++first_arg]);
} else if (remaining_args >= 3 && arg.equals("-s")) {
mDeviceSerial = args[++first_arg];
} else if (remaining_args >= 2 && arg.equals("-e")) {
mPressToContinue = true;
+ } else if (remaining_args >= 2 && arg.equals("-v")) {
+ Utils.setUpLogger(LOGGER, true);
+ } else if (remaining_args >= 2 && arg.equals("-d")) {
+ mPressToContinue = false;
+ mReportCollectionDelayMillis = Integer.parseInt(args[++first_arg]);
+ } else if (arg.equals("-h") || arg.equals("--help")) {
+ printUsageMessage();
+ return false;
} else {
- break; // Found the atom list
+ break; // Found the atom list
+ }
+ }
+
+ if (mProtoIncludes.size() == 0) {
+ mProtoIncludes.add(HW_ATOMS_PROTO_FILEPATH);
+ }
+
+ for (; first_arg < args.length; ++first_arg) {
+ String atom = args[first_arg];
+ try {
+ configuration.addAtom(Integer.valueOf(atom), mProtoIncludes);
+ } catch (NumberFormatException e) {
+ LOGGER.severe("Bad atom id provided: " + atom);
}
}
@@ -145,20 +220,11 @@
return false;
}
- for ( ; first_arg < args.length; ++first_arg) {
- String atom = args[first_arg];
- try {
- configuration.addAtom(Integer.valueOf(atom));
- } catch (NumberFormatException e) {
- LOGGER.severe("Bad atom id provided: " + atom);
- }
- }
-
return configuration.hasPulledAtoms() || configuration.hasPushedAtoms();
}
- private ConfigMetricsReportList testDriveAndGetReports(StatsdConfig config,
- boolean hasPulledAtoms, boolean hasPushedAtoms) {
+ private ConfigMetricsReportList testDriveAndGetReports(
+ StatsdConfig config, boolean hasPulledAtoms, boolean hasPushedAtoms) {
if (config == null) {
LOGGER.severe("Failed to create valid config.");
return null;
@@ -167,21 +233,22 @@
String remoteConfigPath = null;
try {
remoteConfigPath = pushConfig(config, mDeviceSerial);
- LOGGER.info("Pushed the following config to statsd on device '" + mDeviceSerial
- + "':");
+ LOGGER.info("Pushed the following config to statsd on device '" + mDeviceSerial + "':");
LOGGER.info(config.toString());
if (hasPushedAtoms) {
LOGGER.info("Now please play with the device to trigger the event.");
}
if (!hasPulledAtoms) {
if (mPressToContinue) {
- LOGGER.info("Press enter after you finish playing with the device...");
+ LOGGER.info("Press Enter after you finish playing with the device...");
Scanner scanner = new Scanner(System.in);
scanner.nextLine();
} else {
LOGGER.info(
- "All events should be dumped after 1 min ...");
- Thread.sleep(60_000);
+ String.format(
+ "All events should be dumped after %d ms ...",
+ mReportCollectionDelayMillis));
+ Thread.sleep(mReportCollectionDelayMillis);
}
} else {
LOGGER.info("All events should be dumped after 1.5 minutes ...");
@@ -189,16 +256,15 @@
Utils.logAppBreadcrumb(0, 0, LOGGER, mDeviceSerial);
Thread.sleep(75_000);
}
- return Utils.getReportList(CONFIG_ID, true, false, LOGGER,
- mDeviceSerial);
+ return Utils.getReportList(CONFIG_ID, true, false, LOGGER, mDeviceSerial);
} catch (Exception e) {
LOGGER.log(Level.SEVERE, "Failed to test drive: " + e.getMessage(), e);
} finally {
removeConfig(mDeviceSerial);
if (remoteConfigPath != null) {
try {
- Utils.runCommand(null, LOGGER,
- "adb", "-s", mDeviceSerial, "shell", "rm",
+ Utils.runCommand(
+ null, LOGGER, "adb", "-s", mDeviceSerial, "shell", "rm",
remoteConfigPath);
} catch (Exception e) {
LOGGER.log(Level.WARNING,
@@ -216,15 +282,18 @@
@VisibleForTesting
Set<Integer> mPulledAtoms = new TreeSet<>();
@VisibleForTesting
- String mAdditionalAllowedPackage = null;
+ ArrayList<String> mAdditionalAllowedPackages = new ArrayList<>();
private final Set<Long> mTrackedMetrics = new HashSet<>();
+ private final String mAndroidBuildTop = System.getenv("ANDROID_BUILD_TOP");
+
+ private Descriptors.Descriptor externalDescriptor = null;
private void dumpMetrics(ConfigMetricsReportList reportList, Dumper dumper) {
// We may get multiple reports. Take the last one.
ConfigMetricsReport report = reportList.getReports(reportList.getReportsCount() - 1);
for (StatsLogReport statsLog : report.getMetricsList()) {
if (isTrackedMetric(statsLog.getMetricId())) {
- dumper.dump(statsLog);
+ dumper.dump(statsLog, externalDescriptor);
}
}
}
@@ -238,10 +307,23 @@
|| atomId >= VENDOR_PULLED_ATOM_START_TAG;
}
- void addAtom(Integer atom) {
- if (Atom.getDescriptor().findFieldByNumber(atom) == null) {
- LOGGER.severe("No such atom found: " + atom);
- return;
+ void addAtom(Integer atom, List<String> protoIncludes) {
+ if (Atom.getDescriptor().findFieldByNumber(atom) == null &&
+ Atom.getDescriptor().isExtensionNumber(atom) == false) {
+ // try to look in alternative locations
+ if (protoIncludes != null) {
+ boolean isAtomDefined = false;
+ for (int i = 0; i < protoIncludes.size(); i++) {
+ isAtomDefined = isAtomDefinedInFile(protoIncludes.get(i), atom);
+ if (isAtomDefined) {
+ break;
+ }
+ }
+ if (!isAtomDefined) {
+ LOGGER.severe("No such atom found: " + atom);
+ return;
+ }
+ }
}
if (isPulledAtom(atom)) {
mPulledAtoms.add(atom);
@@ -250,6 +332,151 @@
}
}
+ private String compileProtoFileIntoDescriptorSet(String protoFileName) {
+ final String protoCompilerBinary = "aprotoc";
+ final String descSetFlag = "--descriptor_set_out";
+ final String includeImportsFlag = "--include_imports";
+ final String includeSourceInfoFlag = "--include_source_info";
+ final String dsFileName = generateDescriptorSetFileName(protoFileName);
+
+ if (dsFileName == null) return null;
+
+ LOGGER.log(Level.FINE, "Target DescriptorSet File " + dsFileName);
+
+ try {
+ List<String> cmdArgs = new ArrayList<>();
+ cmdArgs.add(protoCompilerBinary);
+ cmdArgs.add(descSetFlag);
+ cmdArgs.add(dsFileName);
+ cmdArgs.add(includeImportsFlag);
+ cmdArgs.add(includeSourceInfoFlag);
+
+ // populate the proto_path argument
+ if (mAndroidBuildTop != null) {
+ cmdArgs.add("-I");
+ cmdArgs.add(mAndroidBuildTop);
+
+ Path protoBufSrcPath = Paths.get(mAndroidBuildTop, "external/protobuf/src");
+ cmdArgs.add("-I");
+ cmdArgs.add(protoBufSrcPath.toString());
+ }
+
+ Path protoPath = Paths.get(protoFileName);
+ while (protoPath.getParent() != null) {
+ LOGGER.log(Level.FINE, "Including " + protoPath.getParent().toString());
+ cmdArgs.add("-I");
+ cmdArgs.add(protoPath.getParent().toString());
+ protoPath = protoPath.getParent();
+ }
+ cmdArgs.add(protoFileName);
+
+ String[] commands = new String[cmdArgs.size()];
+ commands = cmdArgs.toArray(commands);
+ Utils.runCommand(null, LOGGER, commands);
+ return dsFileName;
+ } catch (InterruptedException | IOException e) {
+ LOGGER.severe("Error while performing proto compilation: " + e.getMessage());
+ }
+ return null;
+ }
+
+ private String validateIncludeProtoPath(String protoFileName) {
+ try {
+ File protoFile = new File(protoFileName);
+ if (!protoFile.exists()) {
+ protoFileName = Paths.get(mAndroidBuildTop).resolve(
+ protoFileName).toRealPath().toString();
+ }
+
+ // file will be generated in the current work dir
+ return Paths.get(protoFileName).toRealPath().toString();
+ } catch (IOException e) {
+ LOGGER.log(Level.INFO, "Could not find file " + protoFileName);
+ }
+ return null;
+ }
+
+ private String generateDescriptorSetFileName(String protoFileName) {
+ try {
+ // file will be generated in the current work dir
+ final Path protoPath = Paths.get(protoFileName).toRealPath();
+ LOGGER.log(Level.FINE, "Absolute proto file " + protoPath.toString());
+ Path dsPath = Paths.get(System.getProperty("user.dir"));
+ dsPath = dsPath.resolve(protoPath.getFileName().toString() + ".ds.tmp");
+ return dsPath.toString();
+ } catch (IOException e) {
+ LOGGER.severe("Could not find file " + protoFileName);
+ }
+ return null;
+ }
+
+ private boolean isAtomDefinedInFile(String fileName, Integer atom) {
+ final String fullProtoFilePath = validateIncludeProtoPath(fileName);
+ if (fullProtoFilePath == null) return false;
+
+ final String dsFileName = compileProtoFileIntoDescriptorSet(fullProtoFilePath);
+ if (dsFileName == null) return false;
+
+ try (InputStream input = new FileInputStream(dsFileName)) {
+ DescriptorProtos.FileDescriptorSet fileDescriptorSet =
+ DescriptorProtos.FileDescriptorSet.parseFrom(input);
+ Descriptors.FileDescriptor fieldOptionsDesc =
+ DescriptorProtos.FieldOptions.getDescriptor().getFile();
+
+ LOGGER.fine("Files count is " + fileDescriptorSet.getFileCount());
+
+ // preparing dependencies list
+ List<Descriptors.FileDescriptor> dependencies =
+ new ArrayList<Descriptors.FileDescriptor>();
+ for (int fileIndex = 0; fileIndex < fileDescriptorSet.getFileCount(); fileIndex++) {
+ LOGGER.fine("Processing file " + fileIndex);
+ try {
+ Descriptors.FileDescriptor dep = Descriptors.FileDescriptor.buildFrom(
+ fileDescriptorSet.getFile(fileIndex),
+ new Descriptors.FileDescriptor[0]);
+ dependencies.add(dep);
+ } catch (Descriptors.DescriptorValidationException e) {
+ LOGGER.fine("Unable to parse atoms proto file: " + fileName + ". Error: "
+ + e.getDescription());
+ }
+ }
+
+ Descriptors.FileDescriptor[] fileDescriptorDeps =
+ new Descriptors.FileDescriptor[dependencies.size()];
+ fileDescriptorDeps = dependencies.toArray(fileDescriptorDeps);
+
+ // looking for a file with an Atom definition
+ for (int fileIndex = 0; fileIndex < fileDescriptorSet.getFileCount(); fileIndex++) {
+ LOGGER.fine("Processing file " + fileIndex);
+ Descriptors.Descriptor atomMsgDesc = null;
+ try {
+ atomMsgDesc = Descriptors.FileDescriptor.buildFrom(
+ fileDescriptorSet.getFile(fileIndex), fileDescriptorDeps,
+ true)
+ .findMessageTypeByName("Atom");
+ } catch (Descriptors.DescriptorValidationException e) {
+ LOGGER.severe("Unable to parse atoms proto file: " + fileName + ". Error: "
+ + e.getDescription());
+ }
+
+ if (atomMsgDesc != null) {
+ LOGGER.fine("Atom message is located");
+ }
+
+ if (atomMsgDesc != null && atomMsgDesc.findFieldByNumber(atom) != null) {
+ externalDescriptor = atomMsgDesc;
+ return true;
+ }
+ }
+ } catch (IOException e) {
+ LOGGER.log(Level.WARNING, "Unable to parse atoms proto file: " + fileName, e);
+ } finally {
+ File dsFile = new File(dsFileName);
+ dsFile.delete();
+ }
+ return false;
+ }
+
private boolean hasPulledAtoms() {
return !mPulledAtoms.isEmpty();
}
@@ -296,7 +523,7 @@
}
if (mOnePushedAtomEvent) {
- // Create a union event metric, using an matcher that matches all pulled atoms.
+ // Create a union event metric, using a matcher that matches all pushed atoms.
AtomMatcher unionAtomMatcher = createUnionMatcher(simpleAtomMatchers,
atomMatcherId);
builder.addAtomMatcher(unionAtomMatcher);
@@ -305,12 +532,10 @@
builder.addEventMetric(eventMetricBuilder.build());
mTrackedMetrics.add(metricId++);
} else {
- // Create multiple event metrics, one per pulled atom.
+ // Create multiple event metrics, one per pushed atom.
for (AtomMatcher atomMatcher : simpleAtomMatchers) {
EventMetric.Builder eventMetricBuilder = EventMetric.newBuilder();
- eventMetricBuilder
- .setId(metricId)
- .setWhat(atomMatcher.getId());
+ eventMetricBuilder.setId(metricId).setWhat(atomMatcher.getId());
builder.addEventMetric(eventMetricBuilder.build());
mTrackedMetrics.add(metricId++);
}
@@ -327,8 +552,8 @@
return atomMatcherBuilder.build();
}
- private AtomMatcher createUnionMatcher(List<AtomMatcher> simpleAtomMatchers,
- long atomMatcherId) {
+ private AtomMatcher createUnionMatcher(
+ List<AtomMatcher> simpleAtomMatchers, long atomMatcherId) {
AtomMatcher.Combination.Builder combinationBuilder =
AtomMatcher.Combination.newBuilder();
combinationBuilder.setOperation(StatsdConfigProto.LogicalOperation.OR);
@@ -343,68 +568,91 @@
private StatsdConfig.Builder baseBuilder() {
ArrayList<String> allowedSources = new ArrayList<>();
Collections.addAll(allowedSources, ALLOWED_LOG_SOURCES);
- if (mAdditionalAllowedPackage != null) {
- allowedSources.add(mAdditionalAllowedPackage);
- }
+ allowedSources.addAll(mAdditionalAllowedPackages);
return StatsdConfig.newBuilder()
.addAllAllowedLogSource(allowedSources)
.addAllDefaultPullPackages(Arrays.asList(DEFAULT_PULL_SOURCES))
- .addPullAtomPackages(PullAtomPackages.newBuilder()
- .setAtomId(Atom.MEDIA_DRM_ACTIVITY_INFO_FIELD_NUMBER)
- .addPackages("AID_MEDIA"))
- .addPullAtomPackages(PullAtomPackages.newBuilder()
- .setAtomId(Atom.GPU_STATS_GLOBAL_INFO_FIELD_NUMBER)
- .addPackages("AID_GPU_SERVICE"))
- .addPullAtomPackages(PullAtomPackages.newBuilder()
- .setAtomId(Atom.GPU_STATS_APP_INFO_FIELD_NUMBER)
- .addPackages("AID_GPU_SERVICE"))
- .addPullAtomPackages(PullAtomPackages.newBuilder()
- .setAtomId(Atom.TRAIN_INFO_FIELD_NUMBER)
- .addPackages("AID_STATSD"))
- .addPullAtomPackages(PullAtomPackages.newBuilder()
- .setAtomId(Atom.GENERAL_EXTERNAL_STORAGE_ACCESS_STATS_FIELD_NUMBER)
- .addPackages("com.google.android.providers.media.module"))
- .addPullAtomPackages(PullAtomPackages.newBuilder()
- .setAtomId(Atom.LAUNCHER_LAYOUT_SNAPSHOT_FIELD_NUMBER)
- .addPackages("com.google.android.apps.nexuslauncher"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(Atom.MEDIA_DRM_ACTIVITY_INFO_FIELD_NUMBER)
+ .addPackages("AID_MEDIA"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(Atom.GPU_STATS_GLOBAL_INFO_FIELD_NUMBER)
+ .addPackages("AID_GPU_SERVICE"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(Atom.GPU_STATS_APP_INFO_FIELD_NUMBER)
+ .addPackages("AID_GPU_SERVICE"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(Atom.TRAIN_INFO_FIELD_NUMBER)
+ .addPackages("AID_STATSD"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(
+ Atom.GENERAL_EXTERNAL_STORAGE_ACCESS_STATS_FIELD_NUMBER)
+ .addPackages("com.google.android.providers.media.module"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(Atom.LAUNCHER_LAYOUT_SNAPSHOT_FIELD_NUMBER)
+ .addPackages("com.google.android.apps.nexuslauncher"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(QnsExtensionAtoms
+ .QNS_RAT_PREFERENCE_MISMATCH_INFO_FIELD_NUMBER)
+ .addPackages("com.android.telephony.qns"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(QnsExtensionAtoms
+ .QNS_HANDOVER_TIME_MILLIS_FIELD_NUMBER)
+ .addPackages("com.android.telephony.qns"))
+ .addPullAtomPackages(
+ PullAtomPackages.newBuilder()
+ .setAtomId(QnsExtensionAtoms
+ .QNS_HANDOVER_PINGPONG_FIELD_NUMBER)
+ .addPackages("com.android.telephony.qns"))
.setHashStringsInMetricReport(false);
}
}
interface Dumper {
- void dump(StatsLogReport report);
+ void dump(StatsLogReport report, Descriptors.Descriptor externalDescriptor);
}
static class BasicDumper implements Dumper {
@Override
- public void dump(StatsLogReport report) {
+ public void dump(StatsLogReport report, Descriptors.Descriptor externalDescriptor) {
System.out.println(report.toString());
}
}
static class TerseDumper extends BasicDumper {
@Override
- public void dump(StatsLogReport report) {
+ public void dump(StatsLogReport report, Descriptors.Descriptor externalDescriptor) {
if (report.hasGaugeMetrics()) {
dumpGaugeMetrics(report);
}
if (report.hasEventMetrics()) {
- dumpEventMetrics(report);
+ dumpEventMetrics(report, externalDescriptor);
}
}
- void dumpEventMetrics(StatsLogReport report) {
+
+ void dumpEventMetrics(StatsLogReport report,
+ Descriptors.Descriptor externalDescriptor) {
final List<StatsLog.EventMetricData> data = Utils.getEventMetricData(report);
if (data.isEmpty()) {
return;
}
long firstTimestampNanos = data.get(0).getElapsedTimestampNanos();
for (StatsLog.EventMetricData event : data) {
- final double deltaSec = (event.getElapsedTimestampNanos() - firstTimestampNanos)
- / 1e9;
- System.out.println(
- String.format("+%.3fs: %s", deltaSec, event.getAtom().toString()));
+ final double deltaSec =
+ (event.getElapsedTimestampNanos() - firstTimestampNanos) / 1e9;
+ System.out.println(String.format("+%.3fs: %s", deltaSec,
+ dumpAtom(event.getAtom(), externalDescriptor)));
}
}
+
void dumpGaugeMetrics(StatsLogReport report) {
final List<StatsLog.GaugeMetricData> data = report.getGaugeMetrics().getDataList();
if (data.isEmpty()) {
@@ -416,24 +664,68 @@
}
}
+ private static String dumpAtom(AtomsProto.Atom atom,
+ Descriptors.Descriptor externalDescriptor) {
+ if (atom.getPushedCase().getNumber() != 0 || atom.getPulledCase().getNumber() != 0) {
+ return atom.toString();
+ } else {
+ try {
+ return convertToExternalAtom(atom, externalDescriptor).toString();
+ } catch (Exception e) {
+ LOGGER.severe("Failed to parse an atom: " + e.getMessage());
+ return "";
+ }
+ }
+ }
+
+ private static DynamicMessage convertToExternalAtom(AtomsProto.Atom atom,
+ Descriptors.Descriptor externalDescriptor) throws Exception {
+ ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
+ CodedOutputStream cos = CodedOutputStream.newInstance(outputStream);
+ atom.writeTo(cos);
+ cos.flush();
+ ByteArrayInputStream inputStream = new ByteArrayInputStream(
+ outputStream.toByteArray());
+ CodedInputStream cis = CodedInputStream.newInstance(inputStream);
+ return DynamicMessage.parseFrom(externalDescriptor, cis);
+ }
+
+
private static String pushConfig(StatsdConfig config, String deviceSerial)
throws IOException, InterruptedException {
File configFile = File.createTempFile("statsdconfig", ".config");
configFile.deleteOnExit();
Files.write(config.toByteArray(), configFile);
String remotePath = "/data/local/tmp/" + configFile.getName();
- Utils.runCommand(null, LOGGER, "adb", "-s", deviceSerial,
- "push", configFile.getAbsolutePath(), remotePath);
- Utils.runCommand(null, LOGGER, "adb", "-s", deviceSerial,
- "shell", "cat", remotePath, "|", Utils.CMD_UPDATE_CONFIG,
+ Utils.runCommand(
+ null, LOGGER, "adb", "-s", deviceSerial, "push", configFile.getAbsolutePath(),
+ remotePath);
+ Utils.runCommand(
+ null,
+ LOGGER,
+ "adb",
+ "-s",
+ deviceSerial,
+ "shell",
+ "cat",
+ remotePath,
+ "|",
+ Utils.CMD_UPDATE_CONFIG,
String.valueOf(CONFIG_ID));
return remotePath;
}
private static void removeConfig(String deviceSerial) {
try {
- Utils.runCommand(null, LOGGER, "adb", "-s", deviceSerial,
- "shell", Utils.CMD_REMOVE_CONFIG, String.valueOf(CONFIG_ID));
+ Utils.runCommand(
+ null,
+ LOGGER,
+ "adb",
+ "-s",
+ deviceSerial,
+ "shell",
+ Utils.CMD_REMOVE_CONFIG,
+ String.valueOf(CONFIG_ID));
} catch (Exception e) {
LOGGER.severe("Failed to remove config: " + e.getMessage());
}
diff --git a/statsd/tools/localtools/test/com/android/statsd/shelltools/testdrive/ConfigurationTest.java b/statsd/tools/localtools/test/com/android/statsd/shelltools/testdrive/ConfigurationTest.java
index b1cc60f..650128d 100644
--- a/statsd/tools/localtools/test/com/android/statsd/shelltools/testdrive/ConfigurationTest.java
+++ b/statsd/tools/localtools/test/com/android/statsd/shelltools/testdrive/ConfigurationTest.java
@@ -26,6 +26,7 @@
import com.android.internal.os.StatsdConfigProto.StatsdConfig;
import com.android.os.AtomsProto;
+import org.junit.Ignore;
import org.junit.Test;
import java.util.ArrayList;
@@ -56,11 +57,13 @@
private final TestDrive.Configuration mConfiguration = new TestDrive.Configuration();
+ private final String mProtoFilePath = "test/data/atoms.proto";
+
@Test
public void testOnePushed() {
final int atom = 90;
assertFalse(TestDrive.Configuration.isPulledAtom(atom));
- mConfiguration.addAtom(atom);
+ mConfiguration.addAtom(atom, null);
StatsdConfig config = mConfiguration.createConfig();
//event_metric {
@@ -87,11 +90,119 @@
assertEquals(0, atomMatchers.size());
}
+ @Ignore("b/258831376")
+ @Test
+ public void testOnePushedFromExternalAtomsProto() {
+ final int atom = 105999;
+ assertFalse(TestDrive.Configuration.isPulledAtom(atom));
+
+ List<String> mProtoIncludes = new ArrayList<>();
+ mProtoIncludes.add(mProtoFilePath);
+
+ mConfiguration.addAtom(atom, mProtoIncludes);
+ StatsdConfig config = mConfiguration.createConfig();
+
+ //event_metric {
+ // id: 1111
+ // what: 1234567
+ //}
+ //atom_matcher {
+ // id: 1234567
+ // simple_atom_matcher {
+ // atom_id: 105999
+ // }
+ //}
+
+ assertEquals(1, config.getEventMetricCount());
+ assertEquals(0, config.getGaugeMetricCount());
+
+ assertTrue(mConfiguration.isTrackedMetric(config.getEventMetric(0).getId()));
+
+ final List<StatsdConfigProto.AtomMatcher> atomMatchers =
+ new ArrayList<>(config.getAtomMatcherList());
+ assertEquals(atom,
+ findAndRemoveAtomMatcherById(atomMatchers, config.getEventMetric(0).getWhat())
+ .getSimpleAtomMatcher().getAtomId());
+ assertEquals(0, atomMatchers.size());
+ }
+
+ @Ignore("b/258831376")
+ @Test
+ public void testOnePushedFromExternalAtomsProtoWithImportedAtom() {
+ final int atom = 106001;
+ assertFalse(TestDrive.Configuration.isPulledAtom(atom));
+
+ List<String> mProtoIncludes = new ArrayList<>();
+ mProtoIncludes.add(mProtoFilePath);
+
+ mConfiguration.addAtom(atom, mProtoIncludes);
+ StatsdConfig config = mConfiguration.createConfig();
+
+ //event_metric {
+ // id: 1111
+ // what: 1234567
+ //}
+ //atom_matcher {
+ // id: 1234567
+ // simple_atom_matcher {
+ // atom_id: 106001
+ // }
+ //}
+
+ assertEquals(1, config.getEventMetricCount());
+ assertEquals(0, config.getGaugeMetricCount());
+
+ assertTrue(mConfiguration.isTrackedMetric(config.getEventMetric(0).getId()));
+
+ final List<StatsdConfigProto.AtomMatcher> atomMatchers =
+ new ArrayList<>(config.getAtomMatcherList());
+ assertEquals(atom,
+ findAndRemoveAtomMatcherById(atomMatchers, config.getEventMetric(0).getWhat())
+ .getSimpleAtomMatcher().getAtomId());
+ assertEquals(0, atomMatchers.size());
+ }
+
+ @Ignore("b/258831376")
+ @Test
+ public void testOnePushedFromExternalAtomsProtoWithImportedEnum() {
+ final int atom = 100001;
+ assertFalse(TestDrive.Configuration.isPulledAtom(atom));
+
+ List<String> mProtoIncludes = new ArrayList<>();
+ mProtoIncludes.add(mProtoFilePath);
+
+ mConfiguration.addAtom(atom, mProtoIncludes);
+ StatsdConfig config = mConfiguration.createConfig();
+
+ //event_metric {
+ // id: 1111
+ // what: 1234567
+ //}
+ //atom_matcher {
+ // id: 1234567
+ // simple_atom_matcher {
+ // atom_id: 100001
+ // }
+ //}
+
+ assertEquals(1, config.getEventMetricCount());
+ assertEquals(0, config.getGaugeMetricCount());
+
+ assertTrue(mConfiguration.isTrackedMetric(config.getEventMetric(0).getId()));
+
+ final List<StatsdConfigProto.AtomMatcher> atomMatchers =
+ new ArrayList<>(config.getAtomMatcherList());
+ assertEquals(atom,
+ findAndRemoveAtomMatcherById(atomMatchers, config.getEventMetric(0).getWhat())
+ .getSimpleAtomMatcher().getAtomId());
+ assertEquals(0, atomMatchers.size());
+ }
+
@Test
public void testOnePulled() {
final int atom = 10022;
assertTrue(TestDrive.Configuration.isPulledAtom(atom));
- mConfiguration.addAtom(atom);
+ mConfiguration.addAtom(atom, null);
StatsdConfig config = mConfiguration.createConfig();
//gauge_metric {
@@ -141,12 +252,12 @@
public void testOnePulledTwoPushed() {
final int pulledAtom = 10022;
assertTrue(TestDrive.Configuration.isPulledAtom(pulledAtom));
- mConfiguration.addAtom(pulledAtom);
+ mConfiguration.addAtom(pulledAtom, null);
Integer[] pushedAtoms = new Integer[]{244, 245};
for (int atom : pushedAtoms) {
assertFalse(TestDrive.Configuration.isPulledAtom(atom));
- mConfiguration.addAtom(atom);
+ mConfiguration.addAtom(atom, null);
}
StatsdConfig config = mConfiguration.createConfig();
@@ -230,12 +341,12 @@
final int pulledAtom = 10022;
assertTrue(TestDrive.Configuration.isPulledAtom(pulledAtom));
- mConfiguration.addAtom(pulledAtom);
+ mConfiguration.addAtom(pulledAtom, null);
Integer[] pushedAtoms = new Integer[]{244, 245};
for (int atom : pushedAtoms) {
assertFalse(TestDrive.Configuration.isPulledAtom(atom));
- mConfiguration.addAtom(atom);
+ mConfiguration.addAtom(atom, null);
}
StatsdConfig config = mConfiguration.createConfig();
diff --git a/statsd/tools/localtools/test/data/atoms.proto b/statsd/tools/localtools/test/data/atoms.proto
new file mode 100644
index 0000000..78d04a5
--- /dev/null
+++ b/statsd/tools/localtools/test/data/atoms.proto
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.tests;
+
+option java_package = "android.tests";
+option java_outer_classname = "ExternalAtoms";
+
+import "test/data/enum.proto";
+import "test/data/atoms/external_atoms/external_test_atom.proto";
+
+/* Allocated atom IDs. */
+message Atom {
+ oneof pushed {
+ AtomWithEnum atom_with_enum = 100001;
+ AOSPAtom aosp_atom = 105999;
+ external_atoms.TestAtom external_atoms_test_atom = 106001;
+ }
+}
+
+/* A test atom from an AOSP atom ids range */
+message AtomWithEnum {
+ optional string reverse_domain_name = 1;
+ optional android.tests.enum.DemoEnumValues enum_value = 2;
+}
+
+/* A test atom from an AOSP atom ids range */
+message AOSPAtom {
+ optional string reverse_domain_name = 1;
+ optional int32 value = 2;
+}
diff --git a/statsd/tools/localtools/test/data/atoms/external_atoms/external_test_atom.proto b/statsd/tools/localtools/test/data/atoms/external_atoms/external_test_atom.proto
new file mode 100644
index 0000000..ae44b86
--- /dev/null
+++ b/statsd/tools/localtools/test/data/atoms/external_atoms/external_test_atom.proto
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.tests.external_atoms;
+
+option java_package = "android.tests.external_atoms";
+
+message TestAtom {
+ optional string reverse_domain_name = 1;
+ optional int32 int_value = 2;
+}
diff --git a/statsd/tools/localtools/test/data/enum.proto b/statsd/tools/localtools/test/data/enum.proto
new file mode 100644
index 0000000..35c36a2
--- /dev/null
+++ b/statsd/tools/localtools/test/data/enum.proto
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.tests.enum;
+
+enum DemoEnumValues {
+ DEMO_VALUE1 = 1;
+ DEMO_VALUE2 = 2;
+ DEMO_VALUE3 = 3;
+ DEMO_VALUE4 = 4;
+}
diff --git a/tests/AndroidTest.xml b/tests/AndroidTest.xml
index be8a12a..e33c2a0 100644
--- a/tests/AndroidTest.xml
+++ b/tests/AndroidTest.xml
@@ -28,4 +28,12 @@
<object type="module_controller" class="com.android.tradefed.testtype.suite.module.MainlineTestModuleController">
<option name="mainline-module-package-name" value="com.google.android.os.statsd" />
</object>
+
+ <target_preparer class="com.android.tradefed.targetprep.DeviceSetup">
+ <!-- avoid restarting device after modifying settings -->
+ <option name="force-skip-system-props" value="true" />
+
+ <option name="set-global-setting" key="verifier_verify_adb_installs" value="0" />
+ <option name="restore-settings" value="true" />
+ </target_preparer>
</configuration>
diff --git a/tests/apps/statsdapp/AndroidManifest.xml b/tests/apps/statsdapp/AndroidManifest.xml
index 8ad55c9..1f4f664 100644
--- a/tests/apps/statsdapp/AndroidManifest.xml
+++ b/tests/apps/statsdapp/AndroidManifest.xml
@@ -112,7 +112,5 @@
<instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
android:targetPackage="com.android.server.cts.device.statsd"
android:label="CTS tests of android.os.statsd stats collection">
- <meta-data android:name="listener"
- android:value="com.android.cts.runner.CtsTestRunListener"/>
</instrumentation>
</manifest>
diff --git a/tests/apps/statsdapp/src/com/android/server/cts/device/statsd/RestrictedPermissionTests.java b/tests/apps/statsdapp/src/com/android/server/cts/device/statsd/RestrictedPermissionTests.java
new file mode 100644
index 0000000..7896e5b
--- /dev/null
+++ b/tests/apps/statsdapp/src/com/android/server/cts/device/statsd/RestrictedPermissionTests.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.server.cts.device.statsd;
+
+import static org.junit.Assert.fail;
+
+import android.content.pm.PackageInfo;
+import android.content.pm.PackageManager;
+import android.os.UserHandle;
+
+import androidx.test.InstrumentationRegistry;
+import androidx.test.filters.SmallTest;
+import androidx.test.runner.AndroidJUnit4;
+
+import com.android.compatibility.common.util.CddTest;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.util.List;
+
+/**
+ * Build, install and run tests with following command:
+ * atest CtsStatsdHostTestCases
+ */
+@SmallTest
+@RunWith(AndroidJUnit4.class)
+public class RestrictedPermissionTests {
+
+ /**
+ * Verify that the {@link android.Manifest.permission#READ_RESTRICTED_STATS}
+ * permission is only held by at most one package.
+ */
+ @Test
+ @CddTest(requirements={"9.8.17/C-0-1"})
+ public void testReadRestrictedStatsPermission() throws Exception {
+ final PackageManager pm = InstrumentationRegistry.getContext().getPackageManager();
+ final List<PackageInfo> holding = pm.getPackagesHoldingPermissions(new String[]{
+ android.Manifest.permission.READ_RESTRICTED_STATS
+ }, PackageManager.MATCH_ALL);
+
+ int count = 0;
+ String pkgNames = "";
+ for (PackageInfo pkg : holding) {
+ int uid = pm.getApplicationInfo(pkg.packageName, 0).uid;
+ if (UserHandle.isApp(uid)) {
+ pkgNames += pkg.packageName + "\n";
+ count++;
+ }
+ }
+ if (count > 1) {
+ fail("Only one app may hold the READ_RESTRICTED_STATS permission; found packages: \n"
+ + pkgNames);
+ }
+ }
+}
diff --git a/tests/src/android/cts/statsd/atom/AtomTestCase.java b/tests/src/android/cts/statsd/atom/AtomTestCase.java
index fc37543..0e6c9c6 100644
--- a/tests/src/android/cts/statsd/atom/AtomTestCase.java
+++ b/tests/src/android/cts/statsd/atom/AtomTestCase.java
@@ -304,14 +304,16 @@
protected void uploadConfig(StatsdConfig config) throws Exception {
LogUtil.CLog.d("Uploading the following config:\n" + config.toString());
File configFile = File.createTempFile("statsdconfig", ".config");
- configFile.deleteOnExit();
- Files.write(config.toByteArray(), configFile);
- String remotePath = "/data/local/tmp/" + configFile.getName();
- getDevice().pushFile(configFile, remotePath);
- getDevice().executeShellCommand(
- String.join(" ", "cat", remotePath, "|", UPDATE_CONFIG_CMD,
- String.valueOf(SHELL_UID), String.valueOf(CONFIG_ID)));
- getDevice().executeShellCommand("rm " + remotePath);
+ try {
+ Files.write(config.toByteArray(), configFile);
+ String remotePath = "/data/local/tmp/" + configFile.getName();
+ getDevice().pushFile(configFile, remotePath);
+ getDevice().executeShellCommand(String.join(" ", "cat", remotePath, "|",
+ UPDATE_CONFIG_CMD, String.valueOf(SHELL_UID), String.valueOf(CONFIG_ID)));
+ getDevice().executeShellCommand("rm " + remotePath);
+ } finally {
+ configFile.delete();
+ }
}
protected void removeConfig(long configId) throws Exception {
diff --git a/tests/src/android/cts/statsd/metric/CountMetricsTests.java b/tests/src/android/cts/statsd/metric/CountMetricsTests.java
index 1f1dccb..22b4f9c 100644
--- a/tests/src/android/cts/statsd/metric/CountMetricsTests.java
+++ b/tests/src/android/cts/statsd/metric/CountMetricsTests.java
@@ -25,9 +25,9 @@
import com.android.internal.os.StatsdConfigProto.Position;
import com.android.os.AtomsProto.Atom;
import com.android.os.AtomsProto.AppBreadcrumbReported;
-import com.android.os.AtomsProto.AttributionNode;
import com.android.os.AtomsProto.BleScanStateChanged;
import com.android.os.AtomsProto.WakelockStateChanged;
+import com.android.os.AttributionNode;
import com.android.os.StatsLog;
import com.android.os.StatsLog.ConfigMetricsReport;
import com.android.os.StatsLog.ConfigMetricsReportList;
diff --git a/tests/src/android/cts/statsd/metric/ValueMetricsTests.java b/tests/src/android/cts/statsd/metric/ValueMetricsTests.java
index d0370e1..adfc565 100644
--- a/tests/src/android/cts/statsd/metric/ValueMetricsTests.java
+++ b/tests/src/android/cts/statsd/metric/ValueMetricsTests.java
@@ -319,6 +319,8 @@
LogUtil.CLog.d("Got the following value metric data: " + metricReport.toString());
assertThat(metricReport.getMetricId()).isEqualTo(MetricsUtils.VALUE_METRIC_ID);
assertThat(metricReport.getValueMetrics().getDataList()).isEmpty();
+ // Skipped buckets are not added when metric is empty.
+ assertThat(metricReport.getValueMetrics().getSkippedList()).isEmpty();
}
public void testValueMetricWithConditionAndActivation() throws Exception {
diff --git a/tests/src/android/cts/statsd/restricted/ReadRestrictedStatsPermissionTest.java b/tests/src/android/cts/statsd/restricted/ReadRestrictedStatsPermissionTest.java
new file mode 100644
index 0000000..5191f39
--- /dev/null
+++ b/tests/src/android/cts/statsd/restricted/ReadRestrictedStatsPermissionTest.java
@@ -0,0 +1,14 @@
+package android.cts.statsd.restricted;
+
+import android.cts.statsd.atom.DeviceAtomTestCase;
+
+/**
+ * Tests Suite for restricted stats permissions.
+ */
+public class ReadRestrictedStatsPermissionTest extends DeviceAtomTestCase {
+
+ public void testReadRestrictedStatsPermission() throws Exception {
+ runDeviceTests(DEVICE_SIDE_TEST_PACKAGE,
+ ".RestrictedPermissionTests", "testReadRestrictedStatsPermission");
+ }
+}
diff --git a/tests/src/android/cts/statsd/subscriber/ShellSubscriberTest.java b/tests/src/android/cts/statsd/subscriber/ShellSubscriberTest.java
index ba980fb..d3c142b 100644
--- a/tests/src/android/cts/statsd/subscriber/ShellSubscriberTest.java
+++ b/tests/src/android/cts/statsd/subscriber/ShellSubscriberTest.java
@@ -37,13 +37,37 @@
import java.nio.ByteOrder;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
+import android.cts.statsd.atom.AtomTestCase;
/**
* Statsd shell data subscription test.
*/
-public class ShellSubscriberTest extends DeviceTestCase {
+public class ShellSubscriberTest extends AtomTestCase {
private int sizetBytes;
+ public class ShellSubscriptionThread extends Thread {
+ String cmd;
+ CollectingByteOutputReceiver receiver;
+ int maxTimeoutForCommandSec;
+ public ShellSubscriptionThread(
+ String cmd,
+ CollectingByteOutputReceiver receiver,
+ int maxTimeoutForCommandSec) {
+ this.cmd = cmd;
+ this.receiver = receiver;
+ this.maxTimeoutForCommandSec = maxTimeoutForCommandSec;
+ }
+ public void run () {
+ try {
+ getDevice().executeShellCommand(cmd, receiver, maxTimeoutForCommandSec,
+ /*maxTimeToOutputShellResponse=*/maxTimeoutForCommandSec, TimeUnit.SECONDS,
+ /*retryAttempts=*/0);
+ } catch (Exception e) {
+ fail(e.getMessage());
+ }
+ }
+ }
+
@Override
protected void setUp() throws Exception {
super.setUp();
@@ -55,29 +79,128 @@
return;
}
- ShellConfig.ShellSubscription config = createConfig();
- CollectingByteOutputReceiver receiver = new CollectingByteOutputReceiver();
- startSubscription(config, receiver, /*maxTimeoutForCommandSec=*/5,
- /*subscriptionTimeSec=*/5);
+ CollectingByteOutputReceiver receiver = startSubscription();
checkOutput(receiver);
}
+ // This is testShellSubscription but 5x
public void testShellSubscriptionReconnect() {
+ int numOfSubs = 5;
if (sizetBytes < 0) {
return;
}
- ShellConfig.ShellSubscription config = createConfig();
- for (int i = 0; i < 5; i++) {
- CollectingByteOutputReceiver receiver = new CollectingByteOutputReceiver();
- // A subscription time of -1 means that statsd will not impose a timeout on the
- // subscription. Thus, the client will exit before statsd ends the subscription.
- startSubscription(config, receiver, /*maxTimeoutForCommandSec=*/5,
- /*subscriptionTimeSec=*/-1);
+ for (int i = 0; i < numOfSubs; i++) {
+ CollectingByteOutputReceiver receiver = startSubscription();
checkOutput(receiver);
}
}
+ // Tests that multiple clients can run at once:
+ // -Runs maximum number of active subscriptions (20) at once.
+ // -Maximum number of subscriptions minus 1 return:
+ // --Leave 1 subscription alive to ensure the subscriber helper thread stays alive.
+ // -Run maximum number of subscriptions minus 1 to reach the maximum running again.
+ // -Attempt to run one more subscription, which will fail.
+ public void testShellMaxSubscriptions() {
+ // Maximum number of active subscriptions, set in ShellSubscriber.h
+ int maxSubs = 20;
+ if (sizetBytes < 0) {
+ return;
+ }
+ CollectingByteOutputReceiver[] receivers = new CollectingByteOutputReceiver[maxSubs + 1];
+ ShellSubscriptionThread[] shellThreads = new ShellSubscriptionThread[maxSubs + 1];
+ ShellConfig.ShellSubscription config = createConfig();
+ byte[] validConfig = makeValidConfig(config);
+
+ // timeout of 5 sec for all subscriptions except for the first
+ int timeout = 5;
+ // timeout of 25 sec to ensure that the first subscription stays active for two sessions
+ // of creating the maximum number of subscriptions
+ int firstSubTimeout = 25;
+ try {
+ // Push the shell config file to the device
+ String remotePath = pushShellConfigToDevice(validConfig);
+
+ String cmd = "cat " + remotePath + " | cmd stats data-subscribe " + timeout;
+ String firstSubCmd =
+ "cat " + remotePath + " | cmd stats data-subscribe " + firstSubTimeout;
+
+ for (int i = 0; i < maxSubs; i++) {
+ // Run data-subscribe on a thread
+ receivers[i] = new CollectingByteOutputReceiver();
+ if (i == 0) {
+ shellThreads[i] =
+ new ShellSubscriptionThread(firstSubCmd, receivers[i], firstSubTimeout);
+ } else {
+ shellThreads[i] =
+ new ShellSubscriptionThread(cmd, receivers[i], timeout);
+ }
+ shellThreads[i].start();
+ LogUtil.CLog.d("Starting new shell subscription.");
+ }
+ // Sleep 2 seconds to make sure all subscription clients are initialized before
+ // first pushed event
+ Thread.sleep(2000);
+
+ // Pushed event. arbitrary label = 1
+ doAppBreadcrumbReported(1);
+
+ // Make sure the last 19 threads die before moving to the next step.
+ // First subscription is still active due to its longer timeout that is used keep
+ // the subscriber helper thread alive
+ for (int i = 1; i < maxSubs; i++) {
+ shellThreads[i].join();
+ }
+
+ // Validate the outputs of the last 19 subscriptions since they are finished
+ for (int i = 1; i < maxSubs; i++) {
+ checkOutput(receivers[i]);
+ }
+
+ // Run 19 more subscriptions to hit the maximum active subscriptions again
+ for (int i = 1; i < maxSubs; i++) {
+ // Run data-subscribe on a thread
+ receivers[i] = new CollectingByteOutputReceiver();
+ shellThreads[i] =
+ new ShellSubscriptionThread(cmd, receivers[i], timeout);
+ shellThreads[i].start();
+ LogUtil.CLog.d("Starting new shell subscription.");
+ }
+ // Sleep 2 seconds to make sure all subscription clients are initialized before
+ // pushed event
+ Thread.sleep(2000);
+
+ // ShellSubscriber only allows 20 subscriptions at a time. This is the 21st which will
+ // be ignored
+ receivers[maxSubs] = new CollectingByteOutputReceiver();
+ shellThreads[maxSubs] =
+ new ShellSubscriptionThread(cmd, receivers[maxSubs], timeout);
+ shellThreads[maxSubs].start();
+
+ // Sleep 1 seconds to ensure that the 21st subscription is rejected
+ Thread.sleep(1000);
+
+ // Pushed event. arbitrary label = 1
+ doAppBreadcrumbReported(1);
+
+ // Make sure all the threads die before moving to the next step
+ for (int i = 0; i <= maxSubs; i++) {
+ shellThreads[i].join();
+ }
+ // Remove config from device if not already deleted
+ getDevice().executeShellCommand("rm " + remotePath);
+ } catch (Exception e) {
+ fail(e.getMessage());
+ }
+ for (int i = 0; i < maxSubs; i++) {
+ checkOutput(receivers[i]);
+ }
+ // Ensure that the 21st subscription got rejected and has an empty output
+ byte[] output = receivers[maxSubs].getOutput();
+ assertThat(output.length).isEqualTo(0);
+ }
+
private int getSizetBytes() {
try {
ITestDevice device = getDevice();
@@ -93,53 +216,67 @@
}
}
- // Choose a pulled atom that is likely to be supported on all devices (SYSTEM_UPTIME). Testing
- // pushed atoms is trickier because executeShellCommand() is blocking, so we cannot push a
- // breadcrumb event while the shell subscription is running.
private ShellConfig.ShellSubscription createConfig() {
return ShellConfig.ShellSubscription.newBuilder()
- .addPulled(ShellConfig.PulledAtomSubscription.newBuilder()
- .setMatcher(StatsdConfigProto.SimpleAtomMatcher.newBuilder()
- .setAtomId(Atom.SYSTEM_UPTIME_FIELD_NUMBER))
- .setFreqMillis(2000))
- .build();
+ .addPushed((StatsdConfigProto.SimpleAtomMatcher.newBuilder()
+ .setAtomId(Atom.APP_BREADCRUMB_REPORTED_FIELD_NUMBER))
+ .build()).build();
}
- /**
- * @param maxTimeoutForCommandSec maximum time imposed by adb that the command will run
- * @param subscriptionTimeSec maximum time imposed by statsd that the subscription will last
- */
- private void startSubscription(
- ShellConfig.ShellSubscription config,
- CollectingByteOutputReceiver receiver,
- int maxTimeoutForCommandSec,
- int subscriptionTimeSec) {
- LogUtil.CLog.d("Uploading the following config:\n" + config.toString());
+ private byte[] makeValidConfig(ShellConfig.ShellSubscription config) {
+ int length = config.toByteArray().length;
+ byte[] validConfig = new byte[sizetBytes + length];
+ System.arraycopy(IntToByteArrayLittleEndian(length), 0, validConfig, 0, sizetBytes);
+ System.arraycopy(config.toByteArray(), 0, validConfig, sizetBytes, length);
+ return validConfig;
+ }
+
+ private String pushShellConfigToDevice(byte[] validConfig) {
try {
File configFile = File.createTempFile("shellconfig", ".config");
configFile.deleteOnExit();
- int length = config.toByteArray().length;
- byte[] combined = new byte[sizetBytes + config.toByteArray().length];
-
- System.arraycopy(IntToByteArrayLittleEndian(length), 0, combined, 0, sizetBytes);
- System.arraycopy(config.toByteArray(), 0, combined, sizetBytes, length);
-
- Files.write(combined, configFile);
+ Files.write(validConfig, configFile);
String remotePath = "/data/local/tmp/" + configFile.getName();
getDevice().pushFile(configFile, remotePath);
- LogUtil.CLog.d("waiting....................");
+ return remotePath;
- String cmd = String.join(" ", "cat", remotePath, "|", "cmd stats data-subscribe",
- String.valueOf(subscriptionTimeSec));
+ } catch (Exception e) {
+ fail(e.getMessage());
+ }
+ return "";
+ }
+ private CollectingByteOutputReceiver startSubscription() {
+ ShellConfig.ShellSubscription config = createConfig();
+ CollectingByteOutputReceiver receiver = new CollectingByteOutputReceiver();
+ LogUtil.CLog.d("Uploading the following config:\n" + config.toString());
+ byte[] validConfig = makeValidConfig(config);
+ // timeout of 2 sec for both data-subscribe command and executeShellCommand in thread
+ int timeout = 2;
+ try {
+ // Push the shell config file to the device
+ String remotePath = pushShellConfigToDevice(validConfig);
- getDevice().executeShellCommand(cmd, receiver, maxTimeoutForCommandSec,
- /*maxTimeToOutputShellResponse=*/maxTimeoutForCommandSec, TimeUnit.SECONDS,
- /*retryAttempts=*/0);
+ String cmd = "cat " + remotePath + " | cmd stats data-subscribe " + timeout;
+ // Run data-subscribe on a thread
+ ShellSubscriptionThread shellThread =
+ new ShellSubscriptionThread(cmd, receiver, timeout);
+ shellThread.start();
+ LogUtil.CLog.d("Starting new shell subscription.");
+
+ // Sleep a second to make sure subscription is initiated
+ Thread.sleep(1000);
+
+ // Pushed event. arbitrary label = 1
+ doAppBreadcrumbReported(1);
+ // Wait for thread to die before returning
+ shellThread.join();
+ // Remove config from device if not already deleted
getDevice().executeShellCommand("rm " + remotePath);
} catch (Exception e) {
fail(e.getMessage());
}
+ return receiver;
}
private byte[] IntToByteArrayLittleEndian(int length) {
@@ -155,6 +292,7 @@
int startIndex = 0;
byte[] output = receiver.getOutput();
+ LogUtil.CLog.d("output length in checkOutput: " + output.length);
assertThat(output.length).isGreaterThan(0);
while (output.length > startIndex) {
assertThat(output.length).isAtLeast(startIndex + sizetBytes);
@@ -178,8 +316,10 @@
}
assertThat(data.getAtomCount()).isEqualTo(1);
- assertThat(data.getAtom(0).hasSystemUptime()).isTrue();
- assertThat(data.getAtom(0).getSystemUptime().getUptimeMillis()).isGreaterThan(0L);
+ assertThat(data.getAtom(0).hasAppBreadcrumbReported()).isTrue();
+ assertThat(data.getAtom(0).getAppBreadcrumbReported().getLabel()).isEqualTo(1);
+ assertThat(data.getAtom(0).getAppBreadcrumbReported().getState().getNumber())
+ .isEqualTo(1);
atomCount++;
startIndex += sizetBytes + dataLength;
}