Use context specific ring for QSRI export task
... as the guest assumes fence context support in
exportSyncFdForQSRILocked(). Without this, the task and fence end
up on separate rings and the fence may be signaled before the task
is completed. This can lead to unbox_VkImage() fatal failures if
the guest thinks the image is no longer in use (due to fences
being signaled early) and destroys images.
Bug: b/238778162
Test: dEQP-EGL.*
Change-Id: I7c0ec144215f4ec212b8603277114fa1c4f11b6c
diff --git a/stream-servers/virtio-gpu-gfxstream-renderer.cpp b/stream-servers/virtio-gpu-gfxstream-renderer.cpp
index d023cb0..35de5bb 100644
--- a/stream-servers/virtio-gpu-gfxstream-renderer.cpp
+++ b/stream-servers/virtio-gpu-gfxstream-renderer.cpp
@@ -808,7 +808,7 @@
int submitCmd(VirtioGpuCtxId ctxId, void* buffer, int dwordCount) {
// TODO(kaiyili): embed the ring_idx into the command buffer to make it possible to dispatch
// commands on different ring.
- const VirtioGpuRing ring = VirtioGpuRingGlobal{};
+ VirtioGpuRing ring = VirtioGpuRingGlobal{};
VGPLOG("ctx: %" PRIu32 ", ring: %s buffer: %p dwords: %d", ctxId, to_string(ring).c_str(),
buffer, dwordCount);
@@ -864,6 +864,15 @@
break;
}
case kVirtioGpuNativeSyncVulkanQsriExport: {
+ // The guest QSRI export assumes fence context support and always uses
+ // VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
+ // the same ring as the fence created for the virtio gpu command or the
+ // fence may be signaled without properly waiting for the task to complete.
+ ring = VirtioGpuRingContextSpecific{
+ .mCtxId = ctxId,
+ .mRingIdx = 0,
+ };
+
uint64_t image_handle_lo = dwords[1];
uint64_t image_handle_hi = dwords[2];
uint64_t image_handle = convert32to64(image_handle_lo, image_handle_hi);