Merge "Add depth/stencil formats to ColorBufferGl path" into main
diff --git a/guest/meson.build b/guest/meson.build
index 9c3041a..143fab9 100644
--- a/guest/meson.build
+++ b/guest/meson.build
@@ -35,7 +35,7 @@
thread_dep = dependency('threads')
if with_guest_test
- rutabaga_gfx_ffi_dep = dependency('rutabaga_gfx_ffi')
+ virtgpu_kumquat_dep = dependency('virtgpu_kumquat_ffi')
endif
#===============#
diff --git a/guest/platform/include/virtgpu_gfxstream_protocol.h b/guest/platform/include/virtgpu_gfxstream_protocol.h
index 285e8df..2edc2f3 100644
--- a/guest/platform/include/virtgpu_gfxstream_protocol.h
+++ b/guest/platform/include/virtgpu_gfxstream_protocol.h
@@ -41,6 +41,7 @@
#define GFXSTREAM_CREATE_EXPORT_SYNC_VK 0xa000
#define GFXSTREAM_CREATE_IMPORT_SYNC_VK 0xa001
#define GFXSTREAM_CREATE_QSRI_EXPORT_VK 0xa002
+#define GFXSTREAM_RESOURCE_CREATE_3D 0xa003
// clang-format off
// A placeholder command to ensure virtio-gpu completes
@@ -87,6 +88,22 @@
uint32_t padding;
};
+struct gfxstreamResourceCreate3d {
+ struct gfxstreamHeader hdr;
+ uint32_t target;
+ uint32_t format;
+ uint32_t bind;
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+ uint32_t arraySize;
+ uint32_t lastLevel;
+ uint32_t nrSamples;
+ uint32_t flags;
+ uint32_t pad;
+ uint64_t blobId;
+};
+
struct vulkanCapset {
uint32_t protocolVersion;
@@ -98,7 +115,8 @@
uint32_t deferredMapping;
uint32_t blobAlignment;
uint32_t noRenderControlEnc;
- uint32_t padding[14];
+ uint32_t alwaysBlob;
+ uint32_t padding[13];
};
struct magmaCapset {
diff --git a/guest/platform/kumquat/VirtGpuKumquat.h b/guest/platform/kumquat/VirtGpuKumquat.h
new file mode 100644
index 0000000..624dece
--- /dev/null
+++ b/guest/platform/kumquat/VirtGpuKumquat.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "VirtGpu.h"
+#include "virtgpu_kumquat/virtgpu_kumquat_ffi.h"
+
+class VirtGpuKumquatResource : public std::enable_shared_from_this<VirtGpuKumquatResource>,
+ public VirtGpuResource {
+ public:
+ VirtGpuKumquatResource(struct virtgpu_kumquat* virtGpu, uint32_t blobHandle,
+ uint32_t resourceHandle, uint64_t size);
+ ~VirtGpuKumquatResource();
+
+ uint32_t getResourceHandle() const override;
+ uint32_t getBlobHandle() const override;
+ int wait() override;
+
+ VirtGpuResourceMappingPtr createMapping(void) override;
+ int exportBlob(struct VirtGpuExternalHandle& handle) override;
+
+ int transferFromHost(uint32_t x, uint32_t y, uint32_t w, uint32_t h) override;
+ int transferToHost(uint32_t x, uint32_t y, uint32_t w, uint32_t h) override;
+
+ private:
+ // Not owned. Really should use a ScopedFD for this, but doesn't matter since we have a
+ // singleton deviceimplemenentation anyways.
+ struct virtgpu_kumquat* mVirtGpu = nullptr;
+ ;
+
+ uint32_t mBlobHandle;
+ uint32_t mResourceHandle;
+ uint64_t mSize;
+};
+
+class VirtGpuKumquatResourceMapping : public VirtGpuResourceMapping {
+ public:
+ VirtGpuKumquatResourceMapping(VirtGpuResourcePtr blob, uint8_t* ptr, uint64_t size);
+ ~VirtGpuKumquatResourceMapping(void);
+
+ uint8_t* asRawPtr(void) override;
+
+ private:
+ VirtGpuResourcePtr mBlob;
+ uint8_t* mPtr;
+ uint64_t mSize;
+};
+
+class VirtGpuKumquatDevice : public VirtGpuDevice {
+ public:
+ VirtGpuKumquatDevice(enum VirtGpuCapset capset, int fd = -1);
+ virtual ~VirtGpuKumquatDevice();
+
+ virtual int64_t getDeviceHandle(void);
+
+ virtual struct VirtGpuCaps getCaps(void);
+
+ VirtGpuResourcePtr createBlob(const struct VirtGpuCreateBlob& blobCreate) override;
+ VirtGpuResourcePtr createResource(uint32_t width, uint32_t height, uint32_t virglFormat,
+ uint32_t target, uint32_t bind, uint32_t bpp) override;
+
+ virtual VirtGpuResourcePtr importBlob(const struct VirtGpuExternalHandle& handle);
+ virtual int execBuffer(struct VirtGpuExecBuffer& execbuffer, const VirtGpuResource* blob);
+
+ private:
+ struct virtgpu_kumquat* mVirtGpu = nullptr;
+ ;
+ struct VirtGpuCaps mCaps;
+};
diff --git a/guest/platform/kumquat/VirtGpuKumquatBlob.cpp b/guest/platform/kumquat/VirtGpuKumquatBlob.cpp
new file mode 100644
index 0000000..1a9c99e
--- /dev/null
+++ b/guest/platform/kumquat/VirtGpuKumquatBlob.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/log.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <cerrno>
+#include <cstring>
+
+#include "VirtGpuKumquat.h"
+#include "virtgpu_kumquat/virtgpu_kumquat_ffi.h"
+
+VirtGpuKumquatResource::VirtGpuKumquatResource(struct virtgpu_kumquat* virtGpu, uint32_t blobHandle,
+ uint32_t resourceHandle, uint64_t size)
+ : mVirtGpu(virtGpu), mBlobHandle(blobHandle), mResourceHandle(resourceHandle), mSize(size) {}
+
+VirtGpuKumquatResource::~VirtGpuKumquatResource() {
+ struct drm_kumquat_resource_unref unref {
+ .bo_handle = mBlobHandle, .pad = 0,
+ };
+
+ int ret = virtgpu_kumquat_resource_unref(mVirtGpu, &unref);
+ if (ret) {
+ ALOGE("Closed failed with : [%s, blobHandle %u, resourceHandle: %u]", strerror(errno),
+ mBlobHandle, mResourceHandle);
+ }
+}
+
+uint32_t VirtGpuKumquatResource::getBlobHandle() const { return mBlobHandle; }
+
+uint32_t VirtGpuKumquatResource::getResourceHandle() const { return mResourceHandle; }
+
+VirtGpuResourceMappingPtr VirtGpuKumquatResource::createMapping() {
+ int ret;
+ struct drm_kumquat_map map {
+ .bo_handle = mBlobHandle, .ptr = NULL, .size = mSize,
+ };
+
+ ret = virtgpu_kumquat_resource_map(mVirtGpu, &map);
+ if (ret < 0) {
+ ALOGE("Mapping failed with %s", strerror(errno));
+ return nullptr;
+ }
+
+ return std::make_shared<VirtGpuKumquatResourceMapping>(shared_from_this(), (uint8_t*)map.ptr,
+ mSize);
+}
+
+int VirtGpuKumquatResource::exportBlob(struct VirtGpuExternalHandle& handle) {
+ int ret;
+ struct drm_kumquat_resource_export exp = {0};
+
+ exp.bo_handle = mBlobHandle;
+
+ ret = virtgpu_kumquat_resource_export(mVirtGpu, &exp);
+ if (ret) {
+ ALOGE("Failed to export blob with %s", strerror(errno));
+ return ret;
+ }
+
+ handle.osHandle = static_cast<int64_t>(exp.os_handle);
+ handle.type = static_cast<VirtGpuHandleType>(exp.handle_type);
+ return 0;
+}
+
+int VirtGpuKumquatResource::wait() {
+ int ret;
+ struct drm_kumquat_wait wait = {
+ .handle = mBlobHandle,
+ .flags = 0,
+ };
+
+ ret = virtgpu_kumquat_wait(mVirtGpu, &wait);
+ if (ret < 0) {
+ ALOGE("Wait failed with %s", strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+int VirtGpuKumquatResource::transferToHost(uint32_t x, uint32_t y, uint32_t w, uint32_t h) {
+ int ret;
+ struct drm_kumquat_transfer_to_host xfer = {0};
+
+ xfer.box.x = x;
+ xfer.box.y = y;
+ xfer.box.w = w;
+ xfer.box.h = h;
+ xfer.box.d = 1;
+ xfer.bo_handle = mBlobHandle;
+
+ ret = virtgpu_kumquat_transfer_to_host(mVirtGpu, &xfer);
+ if (ret < 0) {
+ ALOGE("Transfer to host failed with %s", strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+int VirtGpuKumquatResource::transferFromHost(uint32_t x, uint32_t y, uint32_t w, uint32_t h) {
+ int ret;
+ struct drm_kumquat_transfer_from_host xfer = {0};
+
+ xfer.box.x = x;
+ xfer.box.y = y;
+ xfer.box.w = w;
+ xfer.box.h = h;
+ xfer.box.d = 1;
+ xfer.bo_handle = mBlobHandle;
+
+ ret = virtgpu_kumquat_transfer_from_host(mVirtGpu, &xfer);
+ if (ret < 0) {
+ ALOGE("Transfer from host failed with %s", strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/guest/platform/kumquat/VirtGpuKumquatBlobMapping.cpp b/guest/platform/kumquat/VirtGpuKumquatBlobMapping.cpp
new file mode 100644
index 0000000..e4869db
--- /dev/null
+++ b/guest/platform/kumquat/VirtGpuKumquatBlobMapping.cpp
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "VirtGpuKumquat.h"
+
+VirtGpuKumquatResourceMapping::VirtGpuKumquatResourceMapping(VirtGpuResourcePtr blob, uint8_t* ptr,
+ uint64_t size)
+ : mBlob(blob), mPtr(ptr), mSize(size) {}
+
+VirtGpuKumquatResourceMapping::~VirtGpuKumquatResourceMapping(void) { return; }
+
+uint8_t* VirtGpuKumquatResourceMapping::asRawPtr(void) { return mPtr; }
diff --git a/guest/platform/kumquat/VirtGpuKumquatDevice.cpp b/guest/platform/kumquat/VirtGpuKumquatDevice.cpp
new file mode 100644
index 0000000..7de219f
--- /dev/null
+++ b/guest/platform/kumquat/VirtGpuKumquatDevice.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/log.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <cerrno>
+#include <cstring>
+#include <fstream>
+#include <string>
+
+#include "VirtGpuKumquat.h"
+#include "virtgpu_gfxstream_protocol.h"
+#include "virtgpu_kumquat/virtgpu_kumquat_ffi.h"
+
+#define PARAM(x) \
+ (struct VirtGpuParam) { x, #x, 0 }
+
+static inline uint32_t align_up(uint32_t n, uint32_t a) { return ((n + a - 1) / a) * a; }
+
+VirtGpuKumquatDevice::VirtGpuKumquatDevice(enum VirtGpuCapset capset, int fd)
+ : VirtGpuDevice(capset) {
+ struct VirtGpuParam params[] = {
+ PARAM(VIRTGPU_KUMQUAT_PARAM_3D_FEATURES),
+ PARAM(VIRTGPU_KUMQUAT_PARAM_CAPSET_QUERY_FIX),
+ PARAM(VIRTGPU_KUMQUAT_PARAM_RESOURCE_BLOB),
+ PARAM(VIRTGPU_KUMQUAT_PARAM_HOST_VISIBLE),
+ PARAM(VIRTGPU_KUMQUAT_PARAM_CROSS_DEVICE),
+ PARAM(VIRTGPU_KUMQUAT_PARAM_CONTEXT_INIT),
+ PARAM(VIRTGPU_KUMQUAT_PARAM_SUPPORTED_CAPSET_IDs),
+ PARAM(VIRTGPU_KUMQUAT_PARAM_EXPLICIT_DEBUG_NAME),
+ PARAM(VIRTGPU_KUMQUAT_PARAM_CREATE_GUEST_HANDLE),
+ };
+
+ int ret;
+ struct drm_kumquat_get_caps get_caps = {0};
+ struct drm_kumquat_context_init init = {0};
+ struct drm_kumquat_context_set_param ctx_set_params[3] = {{0}};
+ const char* processName = nullptr;
+
+ memset(&mCaps, 0, sizeof(struct VirtGpuCaps));
+
+#ifdef __ANDROID__
+ processName = getprogname();
+#endif
+
+ ret = virtgpu_kumquat_init(&mVirtGpu);
+ if (ret) {
+ ALOGV("Failed to init virtgpu kumquat");
+ return;
+ }
+
+ for (uint32_t i = 0; i < kParamMax; i++) {
+ struct drm_kumquat_getparam get_param = {0};
+ get_param.param = params[i].param;
+ get_param.value = (uint64_t)(uintptr_t)¶ms[i].value;
+
+ ret = virtgpu_kumquat_get_param(mVirtGpu, &get_param);
+ if (ret) {
+ ALOGV("virtgpu backend not enabling %s", params[i].name);
+ continue;
+ }
+
+ mCaps.params[i] = params[i].value;
+ }
+
+ get_caps.cap_set_id = static_cast<uint32_t>(capset);
+ switch (capset) {
+ case kCapsetGfxStreamVulkan:
+ get_caps.size = sizeof(struct vulkanCapset);
+ get_caps.addr = (unsigned long long)&mCaps.vulkanCapset;
+ break;
+ case kCapsetGfxStreamMagma:
+ get_caps.size = sizeof(struct magmaCapset);
+ get_caps.addr = (unsigned long long)&mCaps.magmaCapset;
+ break;
+ case kCapsetGfxStreamGles:
+ get_caps.size = sizeof(struct vulkanCapset);
+ get_caps.addr = (unsigned long long)&mCaps.glesCapset;
+ break;
+ case kCapsetGfxStreamComposer:
+ get_caps.size = sizeof(struct vulkanCapset);
+ get_caps.addr = (unsigned long long)&mCaps.composerCapset;
+ break;
+ default:
+ get_caps.size = 0;
+ }
+
+ ret = virtgpu_kumquat_get_caps(mVirtGpu, &get_caps);
+ if (ret) {
+ // Don't fail get capabilities just yet, AEMU doesn't use this API
+ // yet (b/272121235);
+ ALOGE("DRM_IOCTL_VIRTGPU_KUMQUAT_GET_CAPS failed with %s", strerror(errno));
+ }
+
+ // We always need an ASG blob in some cases, so always define blobAlignment
+ if (!mCaps.vulkanCapset.blobAlignment) {
+ mCaps.vulkanCapset.blobAlignment = 4096;
+ }
+
+ ctx_set_params[0].param = VIRTGPU_KUMQUAT_CONTEXT_PARAM_NUM_RINGS;
+ ctx_set_params[0].value = 2;
+ init.num_params = 1;
+
+ if (capset != kCapsetNone) {
+ ctx_set_params[init.num_params].param = VIRTGPU_KUMQUAT_CONTEXT_PARAM_CAPSET_ID;
+ ctx_set_params[init.num_params].value = static_cast<uint32_t>(capset);
+ init.num_params++;
+ }
+
+ if (mCaps.params[kParamExplicitDebugName] && processName) {
+ ctx_set_params[init.num_params].param = VIRTGPU_KUMQUAT_CONTEXT_PARAM_DEBUG_NAME;
+ ctx_set_params[init.num_params].value = reinterpret_cast<uint64_t>(processName);
+ init.num_params++;
+ }
+
+ init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
+ ret = virtgpu_kumquat_context_init(mVirtGpu, &init);
+ if (ret) {
+ ALOGE(
+ "DRM_IOCTL_VIRTGPU_KUMQUAT_CONTEXT_INIT failed with %s, continuing without context...",
+ strerror(errno));
+ }
+}
+
+VirtGpuKumquatDevice::~VirtGpuKumquatDevice() { virtgpu_kumquat_finish(&mVirtGpu); }
+
+struct VirtGpuCaps VirtGpuKumquatDevice::getCaps(void) { return mCaps; }
+
+int64_t VirtGpuKumquatDevice::getDeviceHandle(void) { return -1; }
+
+VirtGpuResourcePtr VirtGpuKumquatDevice::createResource(uint32_t width, uint32_t height,
+ uint32_t virglFormat, uint32_t target,
+ uint32_t bind, uint32_t bpp) {
+ struct drm_kumquat_resource_create_3d create = {
+ .target = target,
+ .format = virglFormat,
+ .bind = bind,
+ .width = width,
+ .height = height,
+ .depth = 1U,
+ .array_size = 1U,
+ .last_level = 0,
+ .nr_samples = 0,
+ .size = width * height * bpp,
+ .stride = width * bpp,
+ };
+
+ int ret = virtgpu_kumquat_resource_create_3d(mVirtGpu, &create);
+ if (ret) {
+ ALOGE("DRM_IOCTL_VIRTGPU_KUMQUAT_RESOURCE_CREATE failed with %s", strerror(errno));
+ return nullptr;
+ }
+
+ return std::make_shared<VirtGpuKumquatResource>(mVirtGpu, create.bo_handle, create.res_handle,
+ static_cast<uint64_t>(create.size));
+}
+
+VirtGpuResourcePtr VirtGpuKumquatDevice::createBlob(const struct VirtGpuCreateBlob& blobCreate) {
+ int ret;
+ struct drm_kumquat_resource_create_blob create = {0};
+
+ create.size = blobCreate.size;
+ create.blob_mem = blobCreate.blobMem;
+ create.blob_flags = blobCreate.flags;
+ create.blob_id = blobCreate.blobId;
+ create.cmd = (uint64_t)(uintptr_t)blobCreate.blobCmd;
+ create.cmd_size = blobCreate.blobCmdSize;
+
+ ret = virtgpu_kumquat_resource_create_blob(mVirtGpu, &create);
+ if (ret < 0) {
+ ALOGE("DRM_VIRTGPU_KUMQUAT_RESOURCE_CREATE_BLOB failed with %s", strerror(errno));
+ return nullptr;
+ }
+
+ return std::make_shared<VirtGpuKumquatResource>(mVirtGpu, create.bo_handle, create.res_handle,
+ blobCreate.size);
+}
+
+VirtGpuResourcePtr VirtGpuKumquatDevice::importBlob(const struct VirtGpuExternalHandle& handle) {
+ int ret;
+ struct drm_kumquat_resource_import resource_import = {0};
+
+ resource_import.os_handle = static_cast<uint64_t>(handle.osHandle);
+ resource_import.handle_type = static_cast<uint32_t>(handle.type);
+
+ ret = virtgpu_kumquat_resource_import(mVirtGpu, &resource_import);
+ if (ret < 0) {
+ ALOGE("DRM_VIRTGPU_KUMQUAT_RESOURCE_IMPORT failed with %s", strerror(errno));
+ return nullptr;
+ }
+
+ return std::make_shared<VirtGpuKumquatResource>(
+ mVirtGpu, resource_import.bo_handle, resource_import.res_handle, resource_import.size);
+ return nullptr;
+}
+
+int VirtGpuKumquatDevice::execBuffer(struct VirtGpuExecBuffer& execbuffer,
+ const VirtGpuResource* blob) {
+ int ret;
+ struct drm_kumquat_execbuffer exec = {0};
+ uint32_t blobHandle;
+
+ exec.flags = execbuffer.flags;
+ exec.size = execbuffer.command_size;
+ exec.ring_idx = execbuffer.ring_idx;
+ exec.command = (uint64_t)(uintptr_t)(execbuffer.command);
+ exec.fence_fd = -1;
+
+ if (blob) {
+ blobHandle = blob->getBlobHandle();
+ exec.bo_handles = (uint64_t)(uintptr_t)(&blobHandle);
+ exec.num_bo_handles = 1;
+ }
+
+ ret = virtgpu_kumquat_execbuffer(mVirtGpu, &exec);
+ if (ret) {
+ ALOGE("DRM_IOCTL_VIRTGPU_KUMQUAT_EXECBUFFER failed: %s", strerror(errno));
+ return ret;
+ }
+
+ if (execbuffer.flags & kFenceOut) {
+ execbuffer.handle.osHandle = exec.fence_fd;
+ execbuffer.handle.type = kFenceHandleSyncFd;
+ }
+
+ return 0;
+}
+
+VirtGpuDevice* createPlatformVirtGpuDevice(enum VirtGpuCapset capset, int fd) {
+ return new VirtGpuKumquatDevice(capset, fd);
+}
diff --git a/guest/platform/kumquat/VirtGpuKumquatSync.cpp b/guest/platform/kumquat/VirtGpuKumquatSync.cpp
new file mode 100644
index 0000000..d152bff
--- /dev/null
+++ b/guest/platform/kumquat/VirtGpuKumquatSync.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "VirtGpuKumquatSync.h"
+
+#include <unistd.h>
+
+namespace gfxstream {
+
+VirtGpuKumquatSyncHelper::VirtGpuKumquatSyncHelper() {}
+
+int VirtGpuKumquatSyncHelper::wait(int syncFd, int timeoutMilliseconds) {
+ (void)syncFd;
+ (void)timeoutMilliseconds;
+ return -1;
+}
+
+int VirtGpuKumquatSyncHelper::dup(int syncFd) { return ::dup(syncFd); }
+
+int VirtGpuKumquatSyncHelper::close(int syncFd) { return ::close(syncFd); }
+
+SyncHelper* createPlatformSyncHelper() { return new VirtGpuKumquatSyncHelper(); }
+
+} // namespace gfxstream
diff --git a/guest/platform/kumquat/VirtGpuKumquatSync.h b/guest/platform/kumquat/VirtGpuKumquatSync.h
new file mode 100644
index 0000000..c8b89e6
--- /dev/null
+++ b/guest/platform/kumquat/VirtGpuKumquatSync.h
@@ -0,0 +1,32 @@
+// Copyright 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expresso or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include "Sync.h"
+
+namespace gfxstream {
+
+class VirtGpuKumquatSyncHelper : public SyncHelper {
+ public:
+ VirtGpuKumquatSyncHelper();
+
+ int wait(int syncFd, int timeoutMilliseconds) override;
+
+ int dup(int syncFd) override;
+
+ int close(int syncFd) override;
+};
+
+} // namespace gfxstream
diff --git a/guest/platform/kumquat/meson.build b/guest/platform/kumquat/meson.build
new file mode 100644
index 0000000..bd1b4f9
--- /dev/null
+++ b/guest/platform/kumquat/meson.build
@@ -0,0 +1,18 @@
+# Copyright 2022 Android Open Source Project
+# SPDX-License-Identifier: MIT
+
+files_lib_platform = files(
+ '../VirtGpu.cpp',
+ 'VirtGpuKumquatDevice.cpp',
+ 'VirtGpuKumquatBlobMapping.cpp',
+ 'VirtGpuKumquatBlob.cpp',
+ 'VirtGpuKumquatSync.cpp',
+)
+
+lib_platform = static_library(
+ 'platform',
+ files_lib_platform,
+ cpp_args: gfxstream_guest_args,
+ include_directories: [inc_platform, inc_android_compat],
+ dependencies: virtgpu_kumquat_dep,
+)
diff --git a/guest/platform/meson.build b/guest/platform/meson.build
index 0561499..ba0f6d6 100644
--- a/guest/platform/meson.build
+++ b/guest/platform/meson.build
@@ -3,7 +3,7 @@
inc_platform = include_directories('include')
if with_guest_test
- subdir('rutabaga')
+ subdir('kumquat')
else
subdir('linux')
subdir('stub')
diff --git a/guest/platform/rutabaga/meson.build b/guest/platform/rutabaga/meson.build
deleted file mode 100644
index 112d216..0000000
--- a/guest/platform/rutabaga/meson.build
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2022 Android Open Source Project
-# SPDX-License-Identifier: MIT
-
-files_lib_platform = files(
- '../VirtGpu.cpp',
- 'RutabagaLayer.cpp',
- 'RutabagaVirtGpuBlob.cpp',
- 'RutabagaVirtGpuBlobMapping.cpp',
- 'RutabagaVirtGpuSyncHelper.cpp',
- 'RutabagaVirtGpuDevice.cpp',
-)
-
-lib_platform = static_library(
- 'platform',
- files_lib_platform,
- cpp_args: gfxstream_guest_args,
- include_directories: [inc_platform, inc_android_compat],
- link_with: [lib_android_compat],
- dependencies: [drm_dep, rutabaga_gfx_ffi_dep],
-)
diff --git a/guest/vulkan_enc/ResourceTracker.cpp b/guest/vulkan_enc/ResourceTracker.cpp
index 055f566..b663921 100644
--- a/guest/vulkan_enc/ResourceTracker.cpp
+++ b/guest/vulkan_enc/ResourceTracker.cpp
@@ -3756,7 +3756,7 @@
}
#endif
- VirtGpuResourcePtr colorBufferBlob = nullptr;
+ VirtGpuResourcePtr bufferBlob = nullptr;
#if defined(LINUX_GUEST_BUILD)
if (exportDmabuf) {
VirtGpuDevice* instance = VirtGpuDevice::getInstance();
@@ -3806,16 +3806,57 @@
if (VK_IMAGE_TILING_LINEAR == imageCreateInfo.tiling) {
bind |= VIRGL_BIND_LINEAR;
}
- colorBufferBlob = instance->createResource(
- imageCreateInfo.extent.width, imageCreateInfo.extent.height,
- subResourceLayout.rowPitch, virglFormat, target, bind);
- if (!colorBufferBlob) {
- mesa_loge("Failed to create colorBuffer resource for Image memory");
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
- }
- if (!colorBufferBlob->wait()) {
- mesa_loge("Failed to wait for colorBuffer resource for Image memory");
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ if (mCaps.vulkanCapset.alwaysBlob) {
+ struct gfxstreamResourceCreate3d create3d = {};
+ struct VirtGpuExecBuffer exec = {};
+ struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
+ struct VirtGpuCreateBlob createBlob = {};
+
+ create3d.hdr.opCode = GFXSTREAM_RESOURCE_CREATE_3D;
+ create3d.bind = bind;
+ create3d.target = target;
+ create3d.format = virglFormat;
+ create3d.width = imageCreateInfo.extent.width;
+ create3d.height = imageCreateInfo.extent.height;
+ create3d.blobId = ++mBlobId;
+
+ createBlob.blobCmd = reinterpret_cast<uint8_t*>(&create3d);
+ createBlob.blobCmdSize = sizeof(create3d);
+ createBlob.blobMem = kBlobMemHost3d;
+ createBlob.flags = kBlobFlagShareable | kBlobFlagCrossDevice;
+ createBlob.blobId = create3d.blobId;
+ createBlob.size = finalAllocInfo.allocationSize;
+
+ bufferBlob = instance->createBlob(createBlob);
+ if (!bufferBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
+ exec.command = static_cast<void*>(&placeholderCmd);
+ exec.command_size = sizeof(placeholderCmd);
+ exec.flags = kRingIdx;
+ exec.ring_idx = 1;
+ if (instance->execBuffer(exec, bufferBlob.get())) {
+ mesa_loge("Failed to execbuffer placeholder command.");
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ if (bufferBlob->wait()) {
+ mesa_loge("Failed to wait for blob.");
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ } else {
+ bufferBlob = instance->createResource(
+ imageCreateInfo.extent.width, imageCreateInfo.extent.height,
+ subResourceLayout.rowPitch, virglFormat, target, bind);
+ if (!bufferBlob) {
+ mesa_loge("Failed to create colorBuffer resource for Image memory");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ if (!bufferBlob->wait()) {
+ mesa_loge("Failed to wait for colorBuffer resource for Image memory");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
}
} else {
mesa_logw(
@@ -3824,9 +3865,59 @@
"(VkExportMemoryAllocateInfo).\n");
}
} else if (hasDedicatedBuffer) {
- mesa_logw(
- "VkDeviceMemory allocated with VkMemoryDedicatedAllocateInfo::buffer cannot be "
- "exported (VkExportMemoryAllocateInfo)");
+ uint32_t virglFormat = VIRGL_FORMAT_R8_UNORM;
+ const uint32_t target = PIPE_BUFFER;
+ uint32_t bind = VIRGL_BIND_LINEAR;
+ uint32_t width = finalAllocInfo.allocationSize;
+ uint32_t height = 1;
+
+ if (mCaps.vulkanCapset.alwaysBlob) {
+ struct gfxstreamResourceCreate3d create3d = {};
+ struct VirtGpuExecBuffer exec = {};
+ struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
+ struct VirtGpuCreateBlob createBlob = {};
+
+ create3d.hdr.opCode = GFXSTREAM_RESOURCE_CREATE_3D;
+ create3d.bind = bind;
+ create3d.target = target;
+ create3d.format = virglFormat;
+ create3d.width = width;
+ create3d.height = height;
+ create3d.blobId = ++mBlobId;
+
+ createBlob.blobCmd = reinterpret_cast<uint8_t*>(&create3d);
+ createBlob.blobCmdSize = sizeof(create3d);
+ createBlob.blobMem = kBlobMemHost3d;
+ createBlob.flags = kBlobFlagShareable | kBlobFlagCrossDevice;
+ createBlob.blobId = create3d.blobId;
+ createBlob.size = width;
+
+ bufferBlob = instance->createBlob(createBlob);
+ if (!bufferBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
+ exec.command = static_cast<void*>(&placeholderCmd);
+ exec.command_size = sizeof(placeholderCmd);
+ exec.flags = kRingIdx;
+ exec.ring_idx = 1;
+ if (instance->execBuffer(exec, bufferBlob.get())) {
+ mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ bufferBlob->wait();
+ } else {
+ bufferBlob =
+ instance->createResource(width, height, width, virglFormat, target, bind);
+ if (!bufferBlob) {
+ mesa_loge("Failed to create colorBuffer resource for Image memory");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ if (!bufferBlob->wait()) {
+ mesa_loge("Failed to wait for colorBuffer resource for Image memory");
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+ }
} else {
mesa_logw(
"VkDeviceMemory is not exportable (VkExportMemoryAllocateInfo). Requires "
@@ -3840,20 +3931,25 @@
importHandle.type = kMemHandleDmabuf;
auto instance = VirtGpuDevice::getInstance();
- colorBufferBlob = instance->importBlob(importHandle);
- if (!colorBufferBlob) {
+ bufferBlob = instance->importBlob(importHandle);
+ if (!bufferBlob) {
mesa_loge("%s: Failed to import colorBuffer resource\n", __func__);
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
}
- if (colorBufferBlob) {
- importCbInfo.colorBuffer = colorBufferBlob->getResourceHandle();
- vk_append_struct(&structChainIter, &importCbInfo);
+ if (bufferBlob) {
+ if (hasDedicatedBuffer) {
+ importBufferInfo.buffer = bufferBlob->getResourceHandle();
+ vk_append_struct(&structChainIter, &importBufferInfo);
+ } else {
+ importCbInfo.colorBuffer = bufferBlob->getResourceHandle();
+ vk_append_struct(&structChainIter, &importCbInfo);
+ }
}
#endif
- if (ahw || colorBufferBlob || !requestedMemoryIsHostVisible) {
+ if (ahw || bufferBlob || !requestedMemoryIsHostVisible) {
input_result =
enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
@@ -3861,7 +3957,7 @@
VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
setDeviceMemoryInfo(device, *pMemory, 0, nullptr, finalAllocInfo.memoryTypeIndex, ahw,
- isImport, vmo_handle, colorBufferBlob);
+ isImport, vmo_handle, bufferBlob);
_RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
}
diff --git a/host/Buffer.cpp b/host/Buffer.cpp
index b60a1ec..84459db 100644
--- a/host/Buffer.cpp
+++ b/host/Buffer.cpp
@@ -138,4 +138,12 @@
return false;
}
+std::optional<ManagedDescriptorInfo> Buffer::exportBlob() {
+ if (!mBufferVk) {
+ return std::nullopt;
+ }
+
+ return mBufferVk->exportBlob();
+}
+
} // namespace gfxstream
diff --git a/host/Buffer.h b/host/Buffer.h
index 414ff05..cf2b15e 100644
--- a/host/Buffer.h
+++ b/host/Buffer.h
@@ -16,6 +16,7 @@
#include <memory>
+#include "BlobManager.h"
#include "Handle.h"
#include "aemu/base/files/Stream.h"
#include "snapshot/LazySnapshotObj.h"
@@ -57,6 +58,7 @@
void readToBytes(uint64_t offset, uint64_t size, void* outBytes);
bool updateFromBytes(uint64_t offset, uint64_t size, const void* bytes);
+ std::optional<ManagedDescriptorInfo> exportBlob();
private:
Buffer(HandleType handle, uint64_t size);
diff --git a/host/ColorBuffer.cpp b/host/ColorBuffer.cpp
index 1664680..159f8e2 100644
--- a/host/ColorBuffer.cpp
+++ b/host/ColorBuffer.cpp
@@ -455,6 +455,14 @@
return mColorBufferVk->waitSync();
}
+std::optional<ManagedDescriptorInfo> ColorBuffer::exportBlob() {
+ if (!mColorBufferVk) {
+ return std::nullopt;
+ }
+
+ return mColorBufferVk->exportBlob();
+}
+
#if GFXSTREAM_ENABLE_HOST_GLES
bool ColorBuffer::glOpBlitFromCurrentReadBuffer() {
if (!mColorBufferGl) {
diff --git a/host/ColorBuffer.h b/host/ColorBuffer.h
index 43a3330..ba6a9ad 100644
--- a/host/ColorBuffer.h
+++ b/host/ColorBuffer.h
@@ -16,6 +16,7 @@
#include <memory>
+#include "BlobManager.h"
#include "BorrowedImage.h"
#include "FrameworkFormats.h"
#include "Handle.h"
@@ -94,6 +95,7 @@
bool importNativeResource(void* nativeResource, uint32_t type, bool preserveContent);
int waitSync();
+ std::optional<ManagedDescriptorInfo> exportBlob();
#if GFXSTREAM_ENABLE_HOST_GLES
GLuint glOpGetTexture();
diff --git a/host/FrameBuffer.cpp b/host/FrameBuffer.cpp
index b44b5ea..cd02bab 100644
--- a/host/FrameBuffer.cpp
+++ b/host/FrameBuffer.cpp
@@ -339,7 +339,7 @@
#if GFXSTREAM_ENABLE_HOST_GLES
// Do not initialize GL emulation if the guest is using ANGLE.
- if (!fb->m_features.GuestUsesAngle.enabled) {
+ if (!fb->m_features.GuestVulkanOnly.enabled) {
fb->m_emulationGl = EmulationGl::create(width, height, fb->m_features, useSubWindow, egl2egl);
if (!fb->m_emulationGl) {
ERR("Failed to initialize GL emulation.");
@@ -348,9 +348,7 @@
}
#endif
- fb->m_guestUsesAngle = fb->m_features.GuestUsesAngle.enabled;
-
- fb->m_useVulkanComposition = fb->m_features.GuestUsesAngle.enabled ||
+ fb->m_useVulkanComposition = fb->m_features.GuestVulkanOnly.enabled ||
fb->m_features.VulkanNativeSwapchain.enabled;
std::unique_ptr<VkEmulationFeatures> vkEmulationFeatures =
@@ -369,7 +367,7 @@
.astcLdrEmulationMode = AstcEmulationMode::Gpu,
.enableEtc2Emulation = true,
.enableYcbcrEmulation = false,
- .guestUsesAngle = fb->m_guestUsesAngle,
+ .guestVulkanOnly = fb->m_features.GuestVulkanOnly.enabled,
.useDedicatedAllocations = false, // Set later.
});
@@ -378,7 +376,7 @@
// current-context when asked for them.
//
bool useVulkanGraphicsDiagInfo =
- vkEmu && fb->m_features.VulkanNativeSwapchain.enabled && fb->m_guestUsesAngle;
+ vkEmu && fb->m_features.VulkanNativeSwapchain.enabled && fb->m_features.GuestVulkanOnly.enabled;
if (useVulkanGraphicsDiagInfo) {
fb->m_graphicsAdapterVendor = vkEmu->deviceInfo.driverVendor;
@@ -1706,7 +1704,7 @@
bool FrameBuffer::post(HandleType p_colorbuffer, bool needLockAndBind) {
#if GFXSTREAM_ENABLE_HOST_GLES
- if (m_guestUsesAngle) {
+ if (m_features.GuestVulkanOnly.enabled) {
flushColorBufferFromGl(p_colorbuffer);
}
#endif
@@ -1719,7 +1717,7 @@
void FrameBuffer::postWithCallback(HandleType p_colorbuffer, Post::CompletionCallback callback,
bool needLockAndBind) {
#if GFXSTREAM_ENABLE_HOST_GLES
- if (m_guestUsesAngle) {
+ if (m_features.GuestVulkanOnly.enabled) {
flushColorBufferFromGl(p_colorbuffer);
}
#endif
@@ -2982,6 +2980,28 @@
return colorBuffer->waitSync();
}
+std::optional<ManagedDescriptorInfo> FrameBuffer::exportColorBuffer(HandleType colorBufferHandle) {
+ AutoLock mutex(m_lock);
+
+ ColorBufferPtr colorBuffer = findColorBuffer(colorBufferHandle);
+ if (!colorBuffer) {
+ return std::nullopt;
+ }
+
+ return colorBuffer->exportBlob();
+}
+
+std::optional<ManagedDescriptorInfo> FrameBuffer::exportBuffer(HandleType bufferHandle) {
+ AutoLock mutex(m_lock);
+
+ BufferPtr buffer = findBuffer(bufferHandle);
+ if (!buffer) {
+ return std::nullopt;
+ }
+
+ return buffer->exportBlob();
+}
+
#if GFXSTREAM_ENABLE_HOST_GLES
HandleType FrameBuffer::getEmulatedEglWindowSurfaceColorBufferHandle(HandleType p_surface) {
AutoLock mutex(m_lock);
diff --git a/host/FrameBuffer.h b/host/FrameBuffer.h
index 9e7f58a..52104df 100644
--- a/host/FrameBuffer.h
+++ b/host/FrameBuffer.h
@@ -26,6 +26,7 @@
#include <unordered_map>
#include <unordered_set>
+#include "BlobManager.h"
#include "Buffer.h"
#include "ColorBuffer.h"
#include "Compositor.h"
@@ -498,6 +499,8 @@
bool invalidateColorBufferForVk(HandleType colorBufferHandle);
int waitSyncColorBuffer(HandleType colorBufferHandle);
+ std::optional<ManagedDescriptorInfo> exportColorBuffer(HandleType colorBufferHandle);
+ std::optional<ManagedDescriptorInfo> exportBuffer(HandleType bufferHandle);
#if GFXSTREAM_ENABLE_HOST_GLES
// Retrieves the color buffer handle associated with |p_surface|.
@@ -845,7 +848,6 @@
bool m_vulkanInteropSupported = false;
bool m_vulkanEnabled = false;
- bool m_guestUsesAngle = false;
// Whether the guest manages ColorBuffer lifetime
// so we don't need refcounting on the host side.
bool m_guestManagedColorBufferLifetime = false;
diff --git a/host/RenderThread.cpp b/host/RenderThread.cpp
index 2f5aa3a..8a83803 100644
--- a/host/RenderThread.cpp
+++ b/host/RenderThread.cpp
@@ -273,7 +273,7 @@
//
// initialize decoders
#if GFXSTREAM_ENABLE_HOST_GLES
- if (!FrameBuffer::getFB()->getFeatures().GuestUsesAngle.enabled) {
+ if (!FrameBuffer::getFB()->getFeatures().GuestVulkanOnly.enabled) {
tInfo.initGl();
}
diff --git a/host/features/include/gfxstream/host/Features.h b/host/features/include/gfxstream/host/Features.h
index e6a7024..c17fac4 100644
--- a/host/features/include/gfxstream/host/Features.h
+++ b/host/features/include/gfxstream/host/Features.h
@@ -114,10 +114,12 @@
"a guest app may directly writing to gralloc buffers and posting.",
&map,
};
- FeatureInfo GuestUsesAngle = {
- "GuestUsesAngle",
- "If enabled, indicates that the guest will not use GL and the host will not "
- "enable the GL backend.",
+ FeatureInfo GuestVulkanOnly = {
+ "GuestVulkanOnly",
+ "If enabled, indicates that the guest only requires Vulkan translation. "
+ " The guest will not use GL and the host will not enable the GL backend. "
+ " This is the case when the guest uses libraries such as Angle or Zink for "
+ " GL to Vulkan translation.",
&map,
};
FeatureInfo HasSharedSlotsHostMemoryAllocator = {
diff --git a/host/virtgpu_gfxstream_protocol.h b/host/virtgpu_gfxstream_protocol.h
index 823f300..19827ea 100644
--- a/host/virtgpu_gfxstream_protocol.h
+++ b/host/virtgpu_gfxstream_protocol.h
@@ -39,10 +39,11 @@
#define GFXSTREAM_CREATE_EXPORT_SYNC 0x9000
#define GFXSTREAM_CREATE_IMPORT_SYNC 0x9001
-// Vulkan Sync
+// Vulkan related
#define GFXSTREAM_CREATE_EXPORT_SYNC_VK 0xa000
#define GFXSTREAM_CREATE_IMPORT_SYNC_VK 0xa001
#define GFXSTREAM_CREATE_QSRI_EXPORT_VK 0xa002
+#define GFXSTREAM_RESOURCE_CREATE_3D 0xa003
// clang-format off
// A placeholder command to ensure virtio-gpu completes
@@ -89,6 +90,22 @@
uint32_t padding;
};
+struct gfxstreamResourceCreate3d {
+ struct gfxstreamHeader hdr;
+ uint32_t target;
+ uint32_t format;
+ uint32_t bind;
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+ uint32_t arraySize;
+ uint32_t lastLevel;
+ uint32_t nrSamples;
+ uint32_t flags;
+ uint32_t pad;
+ uint64_t blobId;
+};
+
struct vulkanCapset {
uint32_t protocolVersion;
@@ -100,7 +117,8 @@
uint32_t deferredMapping;
uint32_t blobAlignment;
uint32_t noRenderControlEnc;
- uint32_t padding[14];
+ uint32_t alwaysBlob;
+ uint32_t padding[13];
};
struct magmaCapset {
diff --git a/host/virtio-gpu-gfxstream-renderer.cpp b/host/virtio-gpu-gfxstream-renderer.cpp
index e572a90..89ad5cc 100644
--- a/host/virtio-gpu-gfxstream-renderer.cpp
+++ b/host/virtio-gpu-gfxstream-renderer.cpp
@@ -242,6 +242,7 @@
uint32_t addressSpaceHandle;
bool hasAddressSpaceHandle;
std::unordered_map<VirtioGpuResId, uint32_t> addressSpaceHandles;
+ std::unordered_map<uint32_t, struct stream_renderer_resource_create_args> blobMap;
};
enum class ResType {
@@ -252,6 +253,8 @@
BUFFER,
// Used as a GPU texture.
COLOR_BUFFER,
+ // Used as a blob and not known to FrameBuffer.
+ BLOB,
};
struct AlignedMemory {
@@ -782,6 +785,7 @@
return -EINVAL;
}
std::unordered_map<uint32_t, uint32_t> map;
+ std::unordered_map<uint32_t, struct stream_renderer_resource_create_args> blobMap;
PipeCtxEntry res = {
std::move(contextName), // contextName
@@ -792,6 +796,7 @@
0, // AS handle
false, // does not have an AS handle
map, // resourceId --> ASG handle map
+ blobMap, // blobId -> resource create args
};
stream_renderer_debug("initial host pipe for ctxid %u: %p", ctx_id, hostPipe);
@@ -1023,6 +1028,36 @@
[this, taskId] { mVirtioGpuTimelines->notifyTaskCompletion(taskId); });
break;
}
+ case GFXSTREAM_RESOURCE_CREATE_3D: {
+ DECODE(create3d, gfxstream::gfxstreamResourceCreate3d, buffer)
+ struct stream_renderer_resource_create_args rc3d = {0};
+
+ rc3d.target = create3d.target;
+ rc3d.format = create3d.format;
+ rc3d.bind = create3d.bind;
+ rc3d.width = create3d.width;
+ rc3d.height = create3d.height;
+ rc3d.depth = create3d.depth;
+ rc3d.array_size = create3d.arraySize;
+ rc3d.last_level = create3d.lastLevel;
+ rc3d.nr_samples = create3d.nrSamples;
+ rc3d.flags = create3d.flags;
+
+ auto ctxIt = mContexts.find(cmd->ctx_id);
+ if (ctxIt == mContexts.end()) {
+ stream_renderer_error("ctx id %u is not found", cmd->ctx_id);
+ return -EINVAL;
+ }
+
+ auto& ctxEntry = ctxIt->second;
+ if (ctxEntry.blobMap.count(create3d.blobId)) {
+ stream_renderer_error("blob ID already in use");
+ return -EINVAL;
+ }
+
+ ctxEntry.blobMap[create3d.blobId] = rc3d;
+ break;
+ }
case GFXSTREAM_PLACEHOLDER_COMMAND_VK: {
// Do nothing, this is a placeholder command
break;
@@ -1165,6 +1200,8 @@
const auto resType = getResourceType(*args);
switch (resType) {
+ case ResType::BLOB:
+ return -EINVAL;
case ResType::PIPE:
break;
case ResType::BUFFER:
@@ -1207,6 +1244,7 @@
auto& entry = it->second;
switch (entry.type) {
+ case ResType::BLOB:
case ResType::PIPE:
break;
case ResType::BUFFER:
@@ -1434,6 +1472,8 @@
auto& entry = it->second;
switch (entry.type) {
+ case ResType::BLOB:
+ return -EINVAL;
case ResType::PIPE:
ret = handleTransferReadPipe(&entry, offset, box);
break;
@@ -1483,6 +1523,8 @@
}
switch (entry.type) {
+ case ResType::BLOB:
+ return -EINVAL;
case ResType::PIPE:
ret = handleTransferWritePipe(&entry, offset, box);
break;
@@ -1539,6 +1581,10 @@
if (vk_emu && vk_emu->live) {
capset->deferredMapping = 1;
}
+
+#if SUPPORT_DMABUF
+ capset->alwaysBlob = 1;
+#endif
break;
}
case VIRTGPU_CAPSET_GFXSTREAM_MAGMA: {
@@ -1717,9 +1763,48 @@
PipeResEntry e;
struct stream_renderer_resource_create_args args = {0};
+ std::optional<ManagedDescriptorInfo> descriptorInfoOpt = std::nullopt;
e.args = args;
e.hostPipe = 0;
+ auto ctxIt = mContexts.find(ctx_id);
+ if (ctxIt == mContexts.end()) {
+ stream_renderer_error("ctx id %u is not found", ctx_id);
+ return -EINVAL;
+ }
+
+ auto& ctxEntry = ctxIt->second;
+
+ ResType blobType = ResType::BLOB;
+
+ auto blobIt = ctxEntry.blobMap.find(create_blob->blob_id);
+ if (blobIt != ctxEntry.blobMap.end()) {
+ auto& create3d = blobIt->second;
+ create3d.handle = res_handle;
+
+ const auto resType = getResourceType(create3d);
+ switch (resType) {
+ case ResType::BLOB:
+ return -EINVAL;
+ case ResType::PIPE:
+ // Fallthrough for pipe is intended for blob buffers.
+ case ResType::BUFFER:
+ blobType = ResType::BUFFER;
+ handleCreateResourceBuffer(&create3d);
+ descriptorInfoOpt = gfxstream::FrameBuffer::getFB()->exportBuffer(res_handle);
+ break;
+ case ResType::COLOR_BUFFER:
+ blobType = ResType::COLOR_BUFFER;
+ handleCreateResourceColorBuffer(&create3d);
+ descriptorInfoOpt =
+ gfxstream::FrameBuffer::getFB()->exportColorBuffer(res_handle);
+ break;
+ }
+
+ e.args = create3d;
+ ctxEntry.blobMap.erase(create_blob->blob_id);
+ }
+
if (create_blob->blob_id == 0) {
int ret = createRingBlob(e, res_handle, create_blob, handle);
if (ret) {
@@ -1739,8 +1824,11 @@
return -EINVAL;
#endif
} else {
- auto descriptorInfoOpt =
- BlobManager::get()->removeDescriptorInfo(ctx_id, create_blob->blob_id);
+ if (!descriptorInfoOpt) {
+ descriptorInfoOpt =
+ BlobManager::get()->removeDescriptorInfo(ctx_id, create_blob->blob_id);
+ }
+
if (descriptorInfoOpt) {
e.descriptorInfo =
std::make_shared<ManagedDescriptorInfo>(std::move(*descriptorInfoOpt));
@@ -1764,6 +1852,7 @@
e.blobId = create_blob->blob_id;
e.blobMem = create_blob->blob_mem;
e.blobFlags = create_blob->blob_flags;
+ e.type = blobType;
e.iov = nullptr;
e.numIovs = 0;
e.linear = 0;
@@ -2433,7 +2522,7 @@
GFXSTREAM_SET_FEATURE_ON_CONDITION(
&features, GlPipeChecksum, false);
GFXSTREAM_SET_FEATURE_ON_CONDITION(
- &features, GuestUsesAngle,
+ &features, GuestVulkanOnly,
(renderer_flags & STREAM_RENDERER_FLAGS_USE_VK_BIT) &&
!(renderer_flags & STREAM_RENDERER_FLAGS_USE_GLES_BIT));
GFXSTREAM_SET_FEATURE_ON_CONDITION(
diff --git a/host/vulkan/BufferVk.cpp b/host/vulkan/BufferVk.cpp
index 46f4540..a8ec4f3 100644
--- a/host/vulkan/BufferVk.cpp
+++ b/host/vulkan/BufferVk.cpp
@@ -45,5 +45,21 @@
return updateBufferFromBytes(mHandle, offset, size, bytes);
}
+std::optional<ManagedDescriptorInfo> BufferVk::exportBlob() {
+ uint32_t streamHandleType = 0;
+ auto vkHandle = getBufferExtMemoryHandle(mHandle, &streamHandleType);
+ if (vkHandle != VK_EXT_MEMORY_HANDLE_INVALID) {
+ ManagedDescriptor descriptor(dupExternalMemory(vkHandle));
+ return ManagedDescriptorInfo{
+ .descriptor = std::move(descriptor),
+ .handleType = streamHandleType,
+ .caching = 0,
+ .vulkanInfoOpt = std::nullopt,
+ };
+ } else {
+ return std::nullopt;
+ }
+}
+
} // namespace vk
-} // namespace gfxstream
\ No newline at end of file
+} // namespace gfxstream
diff --git a/host/vulkan/BufferVk.h b/host/vulkan/BufferVk.h
index 9016fd7..1df6b55 100644
--- a/host/vulkan/BufferVk.h
+++ b/host/vulkan/BufferVk.h
@@ -15,6 +15,8 @@
#include <memory>
#include <vector>
+#include "BlobManager.h"
+
namespace gfxstream {
namespace vk {
@@ -28,6 +30,8 @@
bool updateFromBytes(uint64_t offset, uint64_t size, const void* bytes);
+ std::optional<ManagedDescriptorInfo> exportBlob();
+
private:
BufferVk(uint32_t handle);
diff --git a/host/vulkan/ColorBufferVk.cpp b/host/vulkan/ColorBufferVk.cpp
index 9461510..cd82b15 100644
--- a/host/vulkan/ColorBufferVk.cpp
+++ b/host/vulkan/ColorBufferVk.cpp
@@ -80,5 +80,19 @@
int ColorBufferVk::waitSync() { return waitSyncVkColorBuffer(mHandle); }
+std::optional<ManagedDescriptorInfo> ColorBufferVk::exportBlob() {
+ auto info = exportColorBufferMemory(mHandle);
+ if (info) {
+ return ManagedDescriptorInfo{
+ .descriptor = std::move((*info).descriptor),
+ .handleType = (*info).streamHandleType,
+ .caching = 0,
+ .vulkanInfoOpt = std::nullopt,
+ };
+ } else {
+ return std::nullopt;
+ }
+}
+
} // namespace vk
} // namespace gfxstream
diff --git a/host/vulkan/ColorBufferVk.h b/host/vulkan/ColorBufferVk.h
index 81e1e42..e7187b9 100644
--- a/host/vulkan/ColorBufferVk.h
+++ b/host/vulkan/ColorBufferVk.h
@@ -17,6 +17,7 @@
#include <memory>
#include <vector>
+#include "BlobManager.h"
#include "FrameworkFormats.h"
#include "aemu/base/files/Stream.h"
@@ -43,6 +44,7 @@
void onSave(android::base::Stream* stream);
int waitSync();
+ std::optional<ManagedDescriptorInfo> exportBlob();
private:
ColorBufferVk(uint32_t handle);
diff --git a/host/vulkan/VkAndroidNativeBuffer.cpp b/host/vulkan/VkAndroidNativeBuffer.cpp
index a3601c1..ce1563f 100644
--- a/host/vulkan/VkAndroidNativeBuffer.cpp
+++ b/host/vulkan/VkAndroidNativeBuffer.cpp
@@ -153,7 +153,7 @@
}
out->useVulkanNativeImage =
- (emu && emu->live && emu->guestUsesAngle) || colorBufferExportedToGl;
+ (emu && emu->live && emu->guestVulkanOnly) || colorBufferExportedToGl;
VkDeviceSize bindOffset = 0;
if (out->externallyBacked) {
diff --git a/host/vulkan/VkCommonOperations.cpp b/host/vulkan/VkCommonOperations.cpp
index 9643bc3..05851be 100644
--- a/host/vulkan/VkCommonOperations.cpp
+++ b/host/vulkan/VkCommonOperations.cpp
@@ -1369,7 +1369,7 @@
INFO(" ASTC LDR emulation mode: %d", features->astcLdrEmulationMode);
INFO(" enable ETC2 emulation: %s", features->enableEtc2Emulation ? "true" : "false");
INFO(" enable Ycbcr emulation: %s", features->enableYcbcrEmulation ? "true" : "false");
- INFO(" guestUsesAngle: %s", features->guestUsesAngle ? "true" : "false");
+ INFO(" guestVulkanOnly: %s", features->guestVulkanOnly ? "true" : "false");
INFO(" useDedicatedAllocations: %s", features->useDedicatedAllocations ? "true" : "false");
sVkEmulation->deviceInfo.glInteropSupported = features->glInteropSupported;
sVkEmulation->useDeferredCommands = features->deferredCommands;
@@ -1378,7 +1378,7 @@
sVkEmulation->astcLdrEmulationMode = features->astcLdrEmulationMode;
sVkEmulation->enableEtc2Emulation = features->enableEtc2Emulation;
sVkEmulation->enableYcbcrEmulation = features->enableYcbcrEmulation;
- sVkEmulation->guestUsesAngle = features->guestUsesAngle;
+ sVkEmulation->guestVulkanOnly = features->guestVulkanOnly;
sVkEmulation->useDedicatedAllocations = features->useDedicatedAllocations;
if (features->useVulkanComposition) {
diff --git a/host/vulkan/VkCommonOperations.h b/host/vulkan/VkCommonOperations.h
index 430233b..cb53bf2 100644
--- a/host/vulkan/VkCommonOperations.h
+++ b/host/vulkan/VkCommonOperations.h
@@ -125,7 +125,7 @@
// conversion or not.
bool enableYcbcrEmulation = false;
- bool guestUsesAngle = false;
+ bool guestVulkanOnly = false;
bool useDedicatedAllocations = false;
@@ -445,7 +445,7 @@
AstcEmulationMode astcLdrEmulationMode = AstcEmulationMode::Disabled;
bool enableEtc2Emulation = false;
bool enableYcbcrEmulation = false;
- bool guestUsesAngle = false;
+ bool guestVulkanOnly = false;
bool useDedicatedAllocations = false;
};
void initVkEmulationFeatures(std::unique_ptr<VkEmulationFeatures>);
diff --git a/host/vulkan/VkDecoderGlobalState.cpp b/host/vulkan/VkDecoderGlobalState.cpp
index 3022974..40c2918 100644
--- a/host/vulkan/VkDecoderGlobalState.cpp
+++ b/host/vulkan/VkDecoderGlobalState.cpp
@@ -372,7 +372,6 @@
.control_get_hw_funcs()
->getPhysAddrStartLocked();
}
- mGuestUsesAngle = m_emu->features.GuestUsesAngle.enabled;
}
~Impl() = default;
@@ -4396,8 +4395,6 @@
void* mappedPtr = nullptr;
ManagedDescriptor externalMemoryHandle;
if (importCbInfoPtr) {
- bool vulkanOnly = mGuestUsesAngle;
-
bool colorBufferMemoryUsesDedicatedAlloc = false;
if (!getColorBufferAllocationInfo(importCbInfoPtr->colorBuffer,
&localAllocInfo.allocationSize,
@@ -4410,7 +4407,7 @@
shouldUseDedicatedAllocInfo &= colorBufferMemoryUsesDedicatedAlloc;
- if (!vulkanOnly) {
+ if (!m_emu->features.GuestVulkanOnly.enabled) {
auto fb = FrameBuffer::getFB();
if (fb) {
fb->invalidateColorBufferForVk(importCbInfoPtr->colorBuffer);
@@ -5375,8 +5372,7 @@
std::unordered_set<HandleType> acquiredColorBuffers;
std::unordered_set<HandleType> releasedColorBuffers;
- bool vulkanOnly = mGuestUsesAngle;
- if (!vulkanOnly) {
+ if (!m_emu->features.GuestVulkanOnly.enabled) {
{
std::lock_guard<std::recursive_mutex> lock(mLock);
for (int i = 0; i < submitCount; i++) {
@@ -7780,7 +7776,6 @@
bool mLogging = false;
bool mVerbosePrints = false;
bool mUseOldMemoryCleanupPath = false;
- bool mGuestUsesAngle = false;
std::recursive_mutex mLock;