blob: 055f566750816f95bc979d3b203390620dd2dbe7 [file] [log] [blame]
Lingfeng Yanga963ea02019-03-21 21:27:04 -07001// Copyright (C) 2018 The Android Open Source Project
Lingfeng Yang71b596b2018-11-07 18:03:25 -08002// Copyright (C) 2018 Google Inc.
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8// http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15
Lingfeng Yang71b596b2018-11-07 18:03:25 -080016#include "ResourceTracker.h"
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -070017
Lingfeng Yang31754632018-12-21 18:24:55 -080018#include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
Lingfeng Yang34b5cae2019-08-21 14:12:19 -070019#include "../OpenglSystemCommon/HostConnection.h"
Shalini Sdb704c92023-01-27 21:35:33 +000020#include "CommandBufferStagingStream.h"
21#include "DescriptorSetVirtualization.h"
Lingfeng Yang58b89c82018-12-25 11:23:21 -080022#include "HostVisibleMemoryVirtualization.h"
Lingfeng Yang71b596b2018-11-07 18:03:25 -080023#include "Resources.h"
Lingfeng Yang131d5a42018-11-30 12:00:33 -080024#include "VkEncoder.h"
Joshua Duongcda9cf12022-10-17 21:41:48 -070025#include "aemu/base/AlignedBuf.h"
Gurchetan Singhc4444b82023-09-19 08:06:20 -070026#include "gfxstream_vk_private.h"
Lingfeng Yang236abc92018-12-21 20:19:33 -080027#include "goldfish_address_space.h"
Lingfeng Yangdef88ba2018-12-13 12:43:17 -080028#include "goldfish_vk_private_defs.h"
Gurchetan Singhfeb8fb12023-05-08 16:36:19 -070029#include "util.h"
30#include "virtgpu_gfxstream_protocol.h"
Sergiuad918472024-05-21 16:28:45 +010031#include "vulkan/vk_enum_string_helper.h"
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -070032#include "vulkan/vulkan_core.h"
Kaiyi Li6a76b332022-08-23 08:10:59 -070033#ifdef VK_USE_PLATFORM_ANDROID_KHR
Lingfeng Yang5c701122019-03-05 08:34:46 -080034#include "vk_format_info.h"
Kaiyi Li6a76b332022-08-23 08:10:59 -070035#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -070036#include <stdlib.h>
37#include <vndk/hardware_buffer.h>
Lingfeng Yangdef88ba2018-12-13 12:43:17 -080038
Jason Macnakd7c55fb2023-12-08 13:15:44 -080039#include <algorithm>
Lingfeng Yange9e77d52020-03-25 14:01:58 -070040#include <set>
Lingfeng Yang154a33c2019-01-29 19:06:23 -080041#include <string>
Lingfeng Yang6ab1b0d2018-11-27 23:36:03 -080042#include <unordered_map>
Lingfeng Yange9e77d52020-03-25 14:01:58 -070043#include <unordered_set>
Lingfeng Yang6ab1b0d2018-11-27 23:36:03 -080044
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -070045#include "vk_struct_id.h"
46#include "vk_util.h"
Lingfeng Yang131d5a42018-11-30 12:00:33 -080047
Yahan Zhouf72403b2022-02-08 18:50:35 -080048#if defined(__ANDROID__) || defined(__linux__) || defined(__APPLE__)
Lingfeng Yangf83538d2019-03-07 15:07:36 -080049
Yahan Zhoua499e442019-02-26 16:35:01 -080050#include <sys/mman.h>
51#include <sys/syscall.h>
52
Yahan Zhoua499e442019-02-26 16:35:01 -080053
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -070054static inline int inline_memfd_create(const char* name, unsigned int flags) {
Gurchetan Singhf0b9b212024-01-22 10:19:40 -080055#if defined(__ANDROID__)
Yahan Zhoua499e442019-02-26 16:35:01 -080056 return syscall(SYS_memfd_create, name, flags);
Gurchetan Singhf0b9b212024-01-22 10:19:40 -080057#else
58 return -1;
Yahan Zhoua499e442019-02-26 16:35:01 -080059#endif
60}
Gurchetan Singha81c85e2022-02-07 18:47:12 -080061
Elliott Hughes4564a052019-08-05 18:55:53 -070062#define memfd_create inline_memfd_create
Gurchetan Singha81c85e2022-02-07 18:47:12 -080063#endif
Yahan Zhoua499e442019-02-26 16:35:01 -080064
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -070065#ifndef VK_USE_PLATFORM_FUCHSIA
66void zx_handle_close(zx_handle_t) {}
67void zx_event_create(int, zx_handle_t*) {}
Lingfeng Yang131d5a42018-11-30 12:00:33 -080068#endif
69
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -070070static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
Lingfeng Yang6ab1b0d2018-11-27 23:36:03 -080071
Jason Macnak3d664002023-03-30 16:00:50 -070072namespace gfxstream {
73namespace vk {
Lingfeng Yang71b596b2018-11-07 18:03:25 -080074
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -070075#define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \
76 void mapHandles_##type_name(type_name* handles, size_t count) override { \
77 for (size_t i = 0; i < count; ++i) { \
78 map_impl; \
79 } \
80 } \
81 void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, \
82 size_t count) override { \
83 for (size_t i = 0; i < count; ++i) { \
84 map_to_u64_impl; \
85 } \
86 } \
87 void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) \
88 override { \
89 for (size_t i = 0; i < count; ++i) { \
90 map_from_u64_impl; \
91 } \
92 }
Lingfeng Yang2285df12018-11-17 16:25:11 -080093
94#define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -070095 class class_name : public VulkanHandleMapping { \
96 public: \
97 virtual ~class_name() {} \
98 GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \
99 };
Lingfeng Yang2285df12018-11-17 16:25:11 -0800100
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700101#define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \
102 MAKE_HANDLE_MAPPING_FOREACH( \
103 type_name, handles[i] = new_from_host_##type_name(handles[i]); \
104 ResourceTracker::get()->register_##type_name(handles[i]); \
105 , handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \
106 handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); \
107 ResourceTracker::get()->register_##type_name(handles[i]);)
Lingfeng Yang2285df12018-11-17 16:25:11 -0800108
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700109#define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \
110 MAKE_HANDLE_MAPPING_FOREACH( \
111 type_name, handles[i] = get_host_##type_name(handles[i]), \
Lingfeng Yang04a57192018-12-20 14:06:45 -0800112 handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
113 handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
Lingfeng Yang2285df12018-11-17 16:25:11 -0800114
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700115#define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \
116 MAKE_HANDLE_MAPPING_FOREACH(type_name, \
117 ResourceTracker::get()->unregister_##type_name(handles[i]); \
118 delete_goldfish_##type_name(handles[i]), (void)handle_u64s[i]; \
119 delete_goldfish_##type_name(handles[i]), (void)handles[i]; \
120 delete_goldfish_##type_name((type_name)handle_u64s[i]))
Lingfeng Yang2285df12018-11-17 16:25:11 -0800121
122DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
Lingfeng Yang2285df12018-11-17 16:25:11 -0800123DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
Lingfeng Yang71b596b2018-11-07 18:03:25 -0800124
Lingfeng Yangdb616552021-01-22 17:58:02 -0800125static uint32_t* sSeqnoPtr = nullptr;
126
Lingfeng Yang40329112021-01-22 18:01:43 -0800127// static
128uint32_t ResourceTracker::streamFeatureBits = 0;
129ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
130
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800131struct StagingInfo {
132 Lock mLock;
133 std::vector<CommandBufferStagingStream*> streams;
134 std::vector<VkEncoder*> encoders;
Shalini Sdb704c92023-01-27 21:35:33 +0000135 /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
136 /// \param allocFn is the callback to allocate memory
137 /// \param freeFn is the callback to free memory
138 void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
139 CommandBufferStagingStream::Free&& freeFn) {
140 mAlloc = allocFn;
141 mFree = freeFn;
142 }
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800143
Yilong Li27957ca2021-01-27 10:53:04 -0800144 ~StagingInfo() {
145 for (auto stream : streams) {
146 delete stream;
147 }
148
149 for (auto encoder : encoders) {
150 delete encoder;
151 }
152 }
153
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800154 void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
Gurchetan Singh6c906de2021-10-21 17:09:00 -0700155 AutoLock<Lock> lock(mLock);
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800156 stream->reset();
157 streams.push_back(stream);
158 encoders.push_back(encoder);
159 }
160
161 void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
Gurchetan Singh6c906de2021-10-21 17:09:00 -0700162 AutoLock<Lock> lock(mLock);
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800163 CommandBufferStagingStream* stream;
164 VkEncoder* encoder;
165 if (streams.empty()) {
Shalini Sdb704c92023-01-27 21:35:33 +0000166 if (mAlloc && mFree) {
167 // if custom allocators are provided, forward them to CommandBufferStagingStream
168 stream = new CommandBufferStagingStream(mAlloc, mFree);
169 } else {
170 stream = new CommandBufferStagingStream;
171 }
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800172 encoder = new VkEncoder(stream);
173 } else {
174 stream = streams.back();
175 encoder = encoders.back();
176 streams.pop_back();
177 encoders.pop_back();
178 }
179 *streamOut = stream;
180 *encoderOut = encoder;
181 }
Shalini Sdb704c92023-01-27 21:35:33 +0000182
183 private:
184 CommandBufferStagingStream::Alloc mAlloc = nullptr;
185 CommandBufferStagingStream::Free mFree = nullptr;
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800186};
187
188static StagingInfo sStaging;
189
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700190struct CommandBufferPendingDescriptorSets {
191 std::unordered_set<VkDescriptorSet> sets;
192};
193
Gurchetan Singhc4444b82023-09-19 08:06:20 -0700194#define HANDLE_REGISTER_IMPL_IMPL(type) \
195 void ResourceTracker::register_##type(type obj) { \
196 AutoLock<RecursiveLock> lock(mLock); \
197 info_##type[obj] = type##_Info(); \
Lingfeng Yangf0654ff2019-02-02 12:21:24 -0800198 }
199
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700200#define HANDLE_UNREGISTER_IMPL_IMPL(type) \
201 void ResourceTracker::unregister_##type(type obj) { \
202 AutoLock<RecursiveLock> lock(mLock); \
203 info_##type.erase(obj); \
Lingfeng Yangdef88ba2018-12-13 12:43:17 -0800204 }
205
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700206GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
207GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
208uint32_t getWaitSemaphoreCount(const VkSubmitInfo& pSubmit) { return pSubmit.waitSemaphoreCount; }
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800209
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700210uint32_t getWaitSemaphoreCount(const VkSubmitInfo2& pSubmit) {
211 return pSubmit.waitSemaphoreInfoCount;
212}
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800213
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700214uint32_t getCommandBufferCount(const VkSubmitInfo& pSubmit) { return pSubmit.commandBufferCount; }
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800215
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700216uint32_t getCommandBufferCount(const VkSubmitInfo2& pSubmit) {
217 return pSubmit.commandBufferInfoCount;
218}
Lingfeng Yangffb94af2021-04-22 15:16:35 -0700219
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700220uint32_t getSignalSemaphoreCount(const VkSubmitInfo& pSubmit) {
221 return pSubmit.signalSemaphoreCount;
222}
Lingfeng Yangffb94af2021-04-22 15:16:35 -0700223
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700224uint32_t getSignalSemaphoreCount(const VkSubmitInfo2& pSubmit) {
225 return pSubmit.signalSemaphoreInfoCount;
226}
Lingfeng Yang967f9af2021-01-22 17:56:24 -0800227
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700228VkSemaphore getWaitSemaphore(const VkSubmitInfo& pSubmit, int i) {
229 return pSubmit.pWaitSemaphores[i];
230}
Lingfeng Yang39a276e2019-06-17 13:27:22 -0700231
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700232VkSemaphore getWaitSemaphore(const VkSubmitInfo2& pSubmit, int i) {
233 return pSubmit.pWaitSemaphoreInfos[i].semaphore;
234}
Lingfeng Yangf8cdd8b2021-02-08 11:32:45 -0800235
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700236VkSemaphore getSignalSemaphore(const VkSubmitInfo& pSubmit, int i) {
237 return pSubmit.pSignalSemaphores[i];
238}
Lingfeng Yang39a276e2019-06-17 13:27:22 -0700239
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700240VkSemaphore getSignalSemaphore(const VkSubmitInfo2& pSubmit, int i) {
241 return pSubmit.pSignalSemaphoreInfos[i].semaphore;
242}
Lingfeng Yangf89f75d2020-07-09 17:42:33 -0700243
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700244VkCommandBuffer getCommandBuffer(const VkSubmitInfo& pSubmit, int i) {
245 return pSubmit.pCommandBuffers[i];
246}
Lingfeng Yangf89f75d2020-07-09 17:42:33 -0700247
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700248VkCommandBuffer getCommandBuffer(const VkSubmitInfo2& pSubmit, int i) {
249 return pSubmit.pCommandBufferInfos[i].commandBuffer;
250}
Lingfeng Yang236abc92018-12-21 20:19:33 -0800251
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700252bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
253 return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
254 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
255}
Lingfeng Yangdef88ba2018-12-13 12:43:17 -0800256
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700257VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
258 VkDescriptorType descType, VkDescriptorSet descSet, uint32_t binding,
259 const VkDescriptorImageInfo* pImageInfo) {
260 VkDescriptorImageInfo res = *pImageInfo;
Lingfeng Yang236abc92018-12-21 20:19:33 -0800261
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700262 if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
263 descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
Lingfeng Yanga4ae0522021-02-17 14:12:19 -0800264 return res;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700265
266 bool immutableSampler =
267 as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
268
269 if (!immutableSampler) return res;
270
271 res.sampler = 0;
272
273 return res;
274}
275
276bool descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet, uint32_t dstBinding) {
277 return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
278}
279
Gurchetan Singhc4444b82023-09-19 08:06:20 -0700280VkDescriptorImageInfo ResourceTracker::filterNonexistentSampler(
281 const VkDescriptorImageInfo& inputInfo) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700282 VkSampler sampler = inputInfo.sampler;
283
284 VkDescriptorImageInfo res = inputInfo;
285
286 if (sampler) {
287 auto it = info_VkSampler.find(sampler);
288 bool samplerExists = it != info_VkSampler.end();
289 if (!samplerExists) res.sampler = 0;
Lingfeng Yange9e77d52020-03-25 14:01:58 -0700290 }
291
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700292 return res;
293}
Lingfeng Yang03354c72020-03-26 13:00:51 -0700294
Gurchetan Singhc4444b82023-09-19 08:06:20 -0700295void ResourceTracker::emitDeviceMemoryReport(VkDevice_Info info,
296 VkDeviceMemoryReportEventTypeEXT type,
297 uint64_t memoryObjectId, VkDeviceSize size,
298 VkObjectType objectType, uint64_t objectHandle,
299 uint32_t heapIndex) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700300 if (info.deviceMemoryReportCallbacks.empty()) return;
301
302 const VkDeviceMemoryReportCallbackDataEXT callbackData = {
303 VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, // sType
304 nullptr, // pNext
305 0, // flags
306 type, // type
307 memoryObjectId, // memoryObjectId
308 size, // size
309 objectType, // objectType
310 objectHandle, // objectHandle
311 heapIndex, // heapIndex
312 };
313 for (const auto& callback : info.deviceMemoryReportCallbacks) {
314 callback.first(&callbackData, callback.second);
Lingfeng Yangffb94af2021-04-22 15:16:35 -0700315 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700316}
David Revemane10aae22019-03-17 15:55:45 -0400317
318#ifdef VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700319inline fuchsia_sysmem::wire::BufferCollectionConstraints defaultBufferCollectionConstraints(
320 size_t minSizeBytes, size_t minBufferCount, size_t maxBufferCount = 0u,
321 size_t minBufferCountForCamping = 0u, size_t minBufferCountForDedicatedSlack = 0u,
322 size_t minBufferCountForSharedSlack = 0u) {
323 fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
324 constraints.min_buffer_count = minBufferCount;
325 if (maxBufferCount > 0) {
326 constraints.max_buffer_count = maxBufferCount;
Lingfeng Yang236abc92018-12-21 20:19:33 -0800327 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700328 if (minBufferCountForCamping) {
329 constraints.min_buffer_count_for_camping = minBufferCountForCamping;
Lingfeng Yang34b5cae2019-08-21 14:12:19 -0700330 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700331 if (minBufferCountForSharedSlack) {
332 constraints.min_buffer_count_for_shared_slack = minBufferCountForSharedSlack;
Lingfeng Yangb8a38c72019-02-02 20:27:54 -0800333 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700334 constraints.has_buffer_memory_constraints = true;
335 fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
336 constraints.buffer_memory_constraints;
Lingfeng Yangb8a38c72019-02-02 20:27:54 -0800337
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700338 buffer_constraints.min_size_bytes = minSizeBytes;
339 buffer_constraints.max_size_bytes = 0xffffffff;
340 buffer_constraints.physically_contiguous_required = false;
341 buffer_constraints.secure_required = false;
Lingfeng Yang31754632018-12-21 18:24:55 -0800342
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700343 // No restrictions on coherency domain or Heaps.
344 buffer_constraints.ram_domain_supported = true;
345 buffer_constraints.cpu_domain_supported = true;
346 buffer_constraints.inaccessible_domain_supported = true;
347 buffer_constraints.heap_permitted_count = 2;
348 buffer_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
349 buffer_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
Lingfeng Yangb55ed1c2019-06-20 15:57:08 -0700350
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700351 return constraints;
352}
Lingfeng Yangc53e7472019-03-27 08:50:55 -0700353
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700354uint32_t getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo* pImageInfo) {
355 uint32_t usage = 0u;
356 VkImageUsageFlags imageUsage = pImageInfo->usage;
Lingfeng Yangf89f75d2020-07-09 17:42:33 -0700357
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700358#define SetUsageBit(BIT, VALUE) \
359 if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) { \
Ian McKellar34ff1e12021-04-15 01:26:47 +0000360 usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
Yilong Li2f315502020-07-10 17:51:28 -0700361 }
362
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700363 SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
364 SetUsageBit(TRANSFER_SRC, TransferSrc);
365 SetUsageBit(TRANSFER_DST, TransferDst);
366 SetUsageBit(SAMPLED, Sampled);
Yilong Li2f315502020-07-10 17:51:28 -0700367
368#undef SetUsageBit
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700369 return usage;
370}
Yilong Li2f315502020-07-10 17:51:28 -0700371
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700372uint32_t getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage) {
373 uint32_t usage = 0u;
Yilong Li2f315502020-07-10 17:51:28 -0700374
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700375#define SetUsageBit(BIT, VALUE) \
376 if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) { \
Ian McKellar34ff1e12021-04-15 01:26:47 +0000377 usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
Yilong Li2f315502020-07-10 17:51:28 -0700378 }
379
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700380 SetUsageBit(TRANSFER_SRC, TransferSrc);
381 SetUsageBit(TRANSFER_DST, TransferDst);
382 SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
383 SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
384 SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
385 SetUsageBit(STORAGE_BUFFER, StorageBuffer);
386 SetUsageBit(INDEX_BUFFER, IndexBuffer);
387 SetUsageBit(VERTEX_BUFFER, VertexBuffer);
388 SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
Yilong Li2f315502020-07-10 17:51:28 -0700389
390#undef SetUsageBit
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700391 return usage;
392}
393
394uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
395 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
396 VkBufferUsageFlags bufferUsage = pBufferConstraintsInfo->createInfo.usage;
397 return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
398}
399
400static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(VkFormat format) {
401 switch (format) {
402 case VK_FORMAT_B8G8R8A8_SINT:
403 case VK_FORMAT_B8G8R8A8_UNORM:
404 case VK_FORMAT_B8G8R8A8_SRGB:
405 case VK_FORMAT_B8G8R8A8_SNORM:
406 case VK_FORMAT_B8G8R8A8_SSCALED:
407 case VK_FORMAT_B8G8R8A8_USCALED:
408 return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
409 case VK_FORMAT_R8G8B8A8_SINT:
410 case VK_FORMAT_R8G8B8A8_UNORM:
411 case VK_FORMAT_R8G8B8A8_SRGB:
412 case VK_FORMAT_R8G8B8A8_SNORM:
413 case VK_FORMAT_R8G8B8A8_SSCALED:
414 case VK_FORMAT_R8G8B8A8_USCALED:
415 return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
416 case VK_FORMAT_R8_UNORM:
417 case VK_FORMAT_R8_UINT:
418 case VK_FORMAT_R8_USCALED:
419 case VK_FORMAT_R8_SNORM:
420 case VK_FORMAT_R8_SINT:
421 case VK_FORMAT_R8_SSCALED:
422 case VK_FORMAT_R8_SRGB:
423 return fuchsia_sysmem::wire::PixelFormatType::kR8;
424 case VK_FORMAT_R8G8_UNORM:
425 case VK_FORMAT_R8G8_UINT:
426 case VK_FORMAT_R8G8_USCALED:
427 case VK_FORMAT_R8G8_SNORM:
428 case VK_FORMAT_R8G8_SINT:
429 case VK_FORMAT_R8G8_SSCALED:
430 case VK_FORMAT_R8G8_SRGB:
431 return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
432 default:
433 return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
434 }
435}
436
437static bool vkFormatMatchesSysmemFormat(VkFormat vkFormat,
438 fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
439 switch (vkFormat) {
440 case VK_FORMAT_B8G8R8A8_SINT:
441 case VK_FORMAT_B8G8R8A8_UNORM:
442 case VK_FORMAT_B8G8R8A8_SRGB:
443 case VK_FORMAT_B8G8R8A8_SNORM:
444 case VK_FORMAT_B8G8R8A8_SSCALED:
445 case VK_FORMAT_B8G8R8A8_USCALED:
446 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kBgra32;
447 case VK_FORMAT_R8G8B8A8_SINT:
448 case VK_FORMAT_R8G8B8A8_UNORM:
449 case VK_FORMAT_R8G8B8A8_SRGB:
450 case VK_FORMAT_R8G8B8A8_SNORM:
451 case VK_FORMAT_R8G8B8A8_SSCALED:
452 case VK_FORMAT_R8G8B8A8_USCALED:
453 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
454 case VK_FORMAT_R8_UNORM:
455 case VK_FORMAT_R8_UINT:
456 case VK_FORMAT_R8_USCALED:
457 case VK_FORMAT_R8_SNORM:
458 case VK_FORMAT_R8_SINT:
459 case VK_FORMAT_R8_SSCALED:
460 case VK_FORMAT_R8_SRGB:
461 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8 ||
462 sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kL8;
463 case VK_FORMAT_R8G8_UNORM:
464 case VK_FORMAT_R8G8_UINT:
465 case VK_FORMAT_R8G8_USCALED:
466 case VK_FORMAT_R8G8_SNORM:
467 case VK_FORMAT_R8G8_SINT:
468 case VK_FORMAT_R8G8_SSCALED:
469 case VK_FORMAT_R8G8_SRGB:
470 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8;
471 default:
472 return false;
473 }
474}
475
476static VkFormat sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format) {
477 switch (format) {
478 case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
479 return VK_FORMAT_B8G8R8A8_SRGB;
480 case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
481 return VK_FORMAT_R8G8B8A8_SRGB;
482 case fuchsia_sysmem::wire::PixelFormatType::kL8:
483 case fuchsia_sysmem::wire::PixelFormatType::kR8:
484 return VK_FORMAT_R8_UNORM;
485 case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
486 return VK_FORMAT_R8G8_UNORM;
487 default:
488 return VK_FORMAT_UNDEFINED;
489 }
490}
491
Mitchell Kemberd9f3e4a2024-01-29 16:53:33 -0800492// TODO(fxbug.dev/42172354): This is currently only used for allocating
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700493// memory for dedicated external images. It should be migrated to use
494// SetBufferCollectionImageConstraintsFUCHSIA.
C Stout5a3a4222023-11-14 16:31:56 -0800495VkResult ResourceTracker::setBufferCollectionConstraintsFUCHSIA(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700496 VkEncoder* enc, VkDevice device,
497 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
498 const VkImageCreateInfo* pImageInfo) {
499 if (pImageInfo == nullptr) {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700500 mesa_loge("setBufferCollectionConstraints: pImageInfo cannot be null.");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700501 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
Yilong Li2f315502020-07-10 17:51:28 -0700502 }
503
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700504 const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
505 .sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
506 .pNext = nullptr,
507 .colorSpace = static_cast<uint32_t>(fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
508 };
Yilong Lie12328f2022-01-06 03:32:13 -0800509
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700510 std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
511 if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
512 const auto kFormats = {
513 VK_FORMAT_B8G8R8A8_SRGB,
514 VK_FORMAT_R8G8B8A8_SRGB,
Yilong Lie12328f2022-01-06 03:32:13 -0800515 };
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700516 for (auto format : kFormats) {
517 // shallow copy, using pNext from pImageInfo directly.
518 auto createInfo = *pImageInfo;
519 createInfo.format = format;
Yilong Lie12328f2022-01-06 03:32:13 -0800520 formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700521 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
Yilong Lie12328f2022-01-06 03:32:13 -0800522 .pNext = nullptr,
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700523 .imageCreateInfo = createInfo,
Yilong Lie12328f2022-01-06 03:32:13 -0800524 .colorSpaceCount = 1,
525 .pColorSpaces = &kDefaultColorSpace,
526 });
527 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700528 } else {
529 formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
530 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
Yilong Lie12328f2022-01-06 03:32:13 -0800531 .pNext = nullptr,
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700532 .imageCreateInfo = *pImageInfo,
533 .colorSpaceCount = 1,
534 .pColorSpaces = &kDefaultColorSpace,
535 });
Yilong Lie12328f2022-01-06 03:32:13 -0800536 }
537
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700538 VkImageConstraintsInfoFUCHSIA imageConstraints = {
539 .sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
540 .pNext = nullptr,
541 .formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
542 .pFormatConstraints = formatInfos.data(),
543 .bufferCollectionConstraints =
544 VkBufferCollectionConstraintsInfoFUCHSIA{
545 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
546 .pNext = nullptr,
547 .minBufferCount = 1,
548 .maxBufferCount = 0,
549 .minBufferCountForCamping = 0,
550 .minBufferCountForDedicatedSlack = 0,
551 .minBufferCountForSharedSlack = 0,
552 },
553 .flags = 0u,
554 };
555
556 return setBufferCollectionImageConstraintsFUCHSIA(enc, device, collection, &imageConstraints);
557}
558
559VkResult addImageBufferCollectionConstraintsFUCHSIA(
560 VkEncoder* enc, VkDevice device, VkPhysicalDevice physicalDevice,
561 const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints, // always non-zero
562 VkImageTiling tiling, fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
563 // First check if the format, tiling and usage is supported on host.
564 VkImageFormatProperties imageFormatProperties;
565 auto createInfo = &formatConstraints->imageCreateInfo;
566 auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
567 physicalDevice, createInfo->format, createInfo->imageType, tiling, createInfo->usage,
568 createInfo->flags, &imageFormatProperties, true /* do lock */);
569 if (result != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700570 mesa_logd(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700571 "%s: Image format (%u) type (%u) tiling (%u) "
572 "usage (%u) flags (%u) not supported by physical "
573 "device",
574 __func__, static_cast<uint32_t>(createInfo->format),
575 static_cast<uint32_t>(createInfo->imageType), static_cast<uint32_t>(tiling),
576 static_cast<uint32_t>(createInfo->usage), static_cast<uint32_t>(createInfo->flags));
577 return VK_ERROR_FORMAT_NOT_SUPPORTED;
578 }
579
580 // Check if format constraints contains unsupported format features.
581 {
582 VkFormatProperties formatProperties;
583 enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, createInfo->format,
584 &formatProperties, true /* do lock */);
585
586 auto supportedFeatures = (tiling == VK_IMAGE_TILING_LINEAR)
587 ? formatProperties.linearTilingFeatures
588 : formatProperties.optimalTilingFeatures;
589 auto requiredFeatures = formatConstraints->requiredFormatFeatures;
590 if ((~supportedFeatures) & requiredFeatures) {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700591 mesa_logd(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700592 "%s: Host device support features for %s tiling: %08x, "
593 "required features: %08x, feature bits %08x missing",
594 __func__, tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
595 static_cast<uint32_t>(requiredFeatures), static_cast<uint32_t>(supportedFeatures),
596 static_cast<uint32_t>((~supportedFeatures) & requiredFeatures));
Yilong Li4d0ee602021-01-28 01:56:33 -0800597 return VK_ERROR_FORMAT_NOT_SUPPORTED;
598 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700599 }
Yilong Li4d0ee602021-01-28 01:56:33 -0800600
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700601 fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
602 if (formatConstraints->sysmemPixelFormat != 0) {
603 auto pixelFormat = static_cast<fuchsia_sysmem::wire::PixelFormatType>(
604 formatConstraints->sysmemPixelFormat);
605 if (createInfo->format != VK_FORMAT_UNDEFINED &&
606 !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700607 mesa_logd("%s: VkFormat %u doesn't match sysmem pixelFormat %lu", __func__,
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700608 static_cast<uint32_t>(createInfo->format), formatConstraints->sysmemPixelFormat);
609 return VK_ERROR_FORMAT_NOT_SUPPORTED;
610 }
611 imageConstraints.pixel_format.type = pixelFormat;
612 } else {
613 auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
614 if (pixel_format == fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700615 mesa_logd("%s: Unsupported VkFormat %u", __func__,
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700616 static_cast<uint32_t>(createInfo->format));
617 return VK_ERROR_FORMAT_NOT_SUPPORTED;
618 }
619 imageConstraints.pixel_format.type = pixel_format;
620 }
Yilong Li4d0ee602021-01-28 01:56:33 -0800621
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700622 imageConstraints.color_spaces_count = formatConstraints->colorSpaceCount;
623 for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
624 imageConstraints.color_space[0].type = static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
625 formatConstraints->pColorSpaces[i].colorSpace);
626 }
627
628 // Get row alignment from host GPU.
629 VkDeviceSize offset = 0;
630 VkDeviceSize rowPitchAlignment = 1u;
631
632 if (tiling == VK_IMAGE_TILING_LINEAR) {
633 VkImageCreateInfo createInfoDup = *createInfo;
634 createInfoDup.pNext = nullptr;
635 enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset, &rowPitchAlignment,
636 true /* do lock */);
Gurchetan Singh42361f72024-05-16 17:37:11 -0700637 mesa_logd(
Gurchetan Singhc4444b82023-09-19 08:06:20 -0700638 "vkGetLinearImageLayout2GOOGLE: format %d offset %lu "
639 "rowPitchAlignment = %lu",
640 (int)createInfo->format, offset, rowPitchAlignment);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700641 }
642
643 imageConstraints.min_coded_width = createInfo->extent.width;
644 imageConstraints.max_coded_width = 0xfffffff;
645 imageConstraints.min_coded_height = createInfo->extent.height;
646 imageConstraints.max_coded_height = 0xffffffff;
647 // The min_bytes_per_row can be calculated by sysmem using
648 // |min_coded_width|, |bytes_per_row_divisor| and color format.
649 imageConstraints.min_bytes_per_row = 0;
650 imageConstraints.max_bytes_per_row = 0xffffffff;
651 imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
652
653 imageConstraints.layers = 1;
654 imageConstraints.coded_width_divisor = 1;
655 imageConstraints.coded_height_divisor = 1;
656 imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
657 imageConstraints.start_offset_divisor = 1;
658 imageConstraints.display_width_divisor = 1;
659 imageConstraints.display_height_divisor = 1;
660 imageConstraints.pixel_format.has_format_modifier = true;
661 imageConstraints.pixel_format.format_modifier.value =
662 (tiling == VK_IMAGE_TILING_LINEAR)
663 ? fuchsia_sysmem::wire::kFormatModifierLinear
664 : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
665
666 constraints->image_format_constraints[constraints->image_format_constraints_count++] =
667 imageConstraints;
668 return VK_SUCCESS;
669}
670
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700671SetBufferCollectionBufferConstraintsResult setBufferCollectionBufferConstraintsImpl(
672 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
673 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
674 const auto& collection = *pCollection;
675 if (pBufferConstraintsInfo == nullptr) {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700676 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700677 "setBufferCollectionBufferConstraints: "
678 "pBufferConstraintsInfo cannot be null.");
679 return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
680 }
681
682 fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
683 defaultBufferCollectionConstraints(
684 /* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
685 /* buffer_count */ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCount);
686 constraints.usage.vulkan =
687 getBufferCollectionConstraintsVulkanBufferUsage(pBufferConstraintsInfo);
688
689 constexpr uint32_t kVulkanPriority = 5;
690 const char kName[] = "GoldfishBufferSysmemShared";
691 collection->SetName(kVulkanPriority, fidl::StringView(kName));
692
693 auto result = collection->SetConstraints(true, constraints);
694 if (!result.ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700695 mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700696 return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
697 }
698
699 return {VK_SUCCESS, constraints};
700}
701#endif
702
703uint64_t getAHardwareBufferId(AHardwareBuffer* ahw) {
704 uint64_t id = 0;
Jason Macnakabe57a82024-02-02 17:02:21 -0800705#if defined(ANDROID)
706 auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
707 gralloc->getId(ahw, &id);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700708#else
709 (void)ahw;
710#endif
711 return id;
712}
713
714void transformExternalResourceMemoryDedicatedRequirementsForGuest(
715 VkMemoryDedicatedRequirements* dedicatedReqs) {
716 dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
717 dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
718}
719
Gurchetan Singhc4444b82023-09-19 08:06:20 -0700720void ResourceTracker::transformImageMemoryRequirementsForGuestLocked(VkImage image,
721 VkMemoryRequirements* reqs) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700722#ifdef VK_USE_PLATFORM_FUCHSIA
723 auto it = info_VkImage.find(image);
724 if (it == info_VkImage.end()) return;
725 auto& info = it->second;
726 if (info.isSysmemBackedMemory) {
727 auto width = info.createInfo.extent.width;
728 auto height = info.createInfo.extent.height;
Gurchetan Singhc4444b82023-09-19 08:06:20 -0700729 reqs->size = width * height * 4;
730 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700731#else
732 // Bypass "unused parameter" checks.
733 (void)image;
Gurchetan Singhc4444b82023-09-19 08:06:20 -0700734 (void)reqs;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700735#endif
736}
737
Gurchetan Singhc4444b82023-09-19 08:06:20 -0700738CoherentMemoryPtr ResourceTracker::freeCoherentMemoryLocked(VkDeviceMemory memory,
739 VkDeviceMemory_Info& info) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700740 if (info.coherentMemory && info.ptr) {
741 if (info.coherentMemory->getDeviceMemory() != memory) {
742 delete_goldfish_VkDeviceMemory(memory);
Yilong Li4d0ee602021-01-28 01:56:33 -0800743 }
744
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700745 if (info.ptr) {
746 info.coherentMemory->release(info.ptr);
747 info.ptr = nullptr;
748 }
749
750 return std::move(info.coherentMemory);
751 }
752
753 return nullptr;
754}
755
756VkResult createFence(VkDevice device, uint64_t hostFenceHandle, int64_t& osHandle) {
757 struct VirtGpuExecBuffer exec = {};
758 struct gfxstreamCreateExportSyncVK exportSync = {};
759 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
760
761 uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
762
763 exportSync.hdr.opCode = GFXSTREAM_CREATE_EXPORT_SYNC_VK;
764 exportSync.deviceHandleLo = (uint32_t)hostDeviceHandle;
765 exportSync.deviceHandleHi = (uint32_t)(hostDeviceHandle >> 32);
766 exportSync.fenceHandleLo = (uint32_t)hostFenceHandle;
767 exportSync.fenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
768
769 exec.command = static_cast<void*>(&exportSync);
770 exec.command_size = sizeof(exportSync);
771 exec.flags = kFenceOut | kRingIdx;
772 if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
773
774 osHandle = exec.handle.osHandle;
775 return VK_SUCCESS;
776}
777
778void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet,
779 std::unordered_set<VkDescriptorSet>& allDs) {
780 if (workingSet.empty()) return;
781
782 std::vector<VkCommandBuffer> nextLevel;
783 for (auto commandBuffer : workingSet) {
784 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
785 forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
786 nextLevel.push_back((VkCommandBuffer)secondary);
787 });
788 }
789
790 collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
791
792 for (auto cmdbuf : workingSet) {
793 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
794
795 if (!cb->userPtr) {
796 continue; // No descriptors to update.
797 }
798
799 CommandBufferPendingDescriptorSets* pendingDescriptorSets =
800 (CommandBufferPendingDescriptorSets*)(cb->userPtr);
801
802 if (pendingDescriptorSets->sets.empty()) {
803 continue; // No descriptors to update.
804 }
805
806 allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
807 }
808}
809
810void commitDescriptorSetUpdates(void* context, VkQueue queue,
811 const std::unordered_set<VkDescriptorSet>& sets) {
812 VkEncoder* enc = (VkEncoder*)context;
813
814 std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
815 std::vector<VkDescriptorPool> pools;
816 std::vector<VkDescriptorSetLayout> setLayouts;
817 std::vector<uint64_t> poolIds;
818 std::vector<uint32_t> descriptorSetWhichPool;
819 std::vector<uint32_t> pendingAllocations;
820 std::vector<uint32_t> writeStartingIndices;
821 std::vector<VkWriteDescriptorSet> writesForHost;
822
823 uint32_t poolIndex = 0;
824 uint32_t currentWriteIndex = 0;
825 for (auto set : sets) {
826 ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
827 VkDescriptorPool pool = reified->pool;
828 VkDescriptorSetLayout setLayout = reified->setLayout;
829
830 auto it = poolSet.find(pool);
831 if (it == poolSet.end()) {
832 poolSet[pool] = poolIndex;
833 descriptorSetWhichPool.push_back(poolIndex);
834 pools.push_back(pool);
835 ++poolIndex;
Yilong Li4d0ee602021-01-28 01:56:33 -0800836 } else {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700837 uint32_t savedPoolIndex = it->second;
838 descriptorSetWhichPool.push_back(savedPoolIndex);
Yilong Li4d0ee602021-01-28 01:56:33 -0800839 }
840
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700841 poolIds.push_back(reified->poolId);
842 setLayouts.push_back(setLayout);
843 pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
844 writeStartingIndices.push_back(currentWriteIndex);
Yilong Li4d0ee602021-01-28 01:56:33 -0800845
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700846 auto& writes = reified->allWrites;
Yilong Li1c607c72022-02-23 13:17:06 -0800847
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700848 for (size_t i = 0; i < writes.size(); ++i) {
849 uint32_t binding = i;
Yilong Li4d0ee602021-01-28 01:56:33 -0800850
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700851 for (size_t j = 0; j < writes[i].size(); ++j) {
852 auto& write = writes[i][j];
Yilong Li4d0ee602021-01-28 01:56:33 -0800853
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700854 if (write.type == DescriptorWriteType::Empty) continue;
Yilong Li4d0ee602021-01-28 01:56:33 -0800855
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700856 uint32_t dstArrayElement = 0;
Yilong Li4d0ee602021-01-28 01:56:33 -0800857
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700858 VkDescriptorImageInfo* imageInfo = nullptr;
859 VkDescriptorBufferInfo* bufferInfo = nullptr;
860 VkBufferView* bufferView = nullptr;
Yilong Lie12328f2022-01-06 03:32:13 -0800861
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700862 switch (write.type) {
863 case DescriptorWriteType::Empty:
Yilong Lie12328f2022-01-06 03:32:13 -0800864 break;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700865 case DescriptorWriteType::ImageInfo:
866 dstArrayElement = j;
867 imageInfo = &write.imageInfo;
868 break;
869 case DescriptorWriteType::BufferInfo:
870 dstArrayElement = j;
871 bufferInfo = &write.bufferInfo;
872 break;
873 case DescriptorWriteType::BufferView:
874 dstArrayElement = j;
875 bufferView = &write.bufferView;
876 break;
877 case DescriptorWriteType::InlineUniformBlock:
878 case DescriptorWriteType::AccelerationStructure:
879 // TODO
Gurchetan Singh42361f72024-05-16 17:37:11 -0700880 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700881 "Encountered pending inline uniform block or acceleration structure "
882 "desc write, abort (NYI)\n");
883 abort();
884 default:
885 break;
Yilong Lie12328f2022-01-06 03:32:13 -0800886 }
Yilong Lie12328f2022-01-06 03:32:13 -0800887
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700888 // TODO: Combine multiple writes into one VkWriteDescriptorSet.
889 VkWriteDescriptorSet forHost = {
890 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
891 0 /* TODO: inline uniform block */,
892 set,
893 binding,
894 dstArrayElement,
895 1,
896 write.descriptorType,
897 imageInfo,
898 bufferInfo,
899 bufferView,
900 };
901
902 writesForHost.push_back(forHost);
903 ++currentWriteIndex;
904
905 // Set it back to empty.
906 write.type = DescriptorWriteType::Empty;
907 }
Yilong Lie12328f2022-01-06 03:32:13 -0800908 }
Yilong Lie12328f2022-01-06 03:32:13 -0800909 }
910
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700911 // Skip out if there's nothing to VkWriteDescriptorSet home about.
912 if (writesForHost.empty()) {
913 return;
914 }
915
916 enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
917 queue, (uint32_t)pools.size(), pools.data(), (uint32_t)sets.size(), setLayouts.data(),
918 poolIds.data(), descriptorSetWhichPool.data(), pendingAllocations.data(),
919 writeStartingIndices.data(), (uint32_t)writesForHost.size(), writesForHost.data(),
920 false /* no lock */);
921
922 // If we got here, then we definitely serviced the allocations.
923 for (auto set : sets) {
924 ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
925 reified->allocationPending = false;
926 }
927}
928
929uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,
930 VkEncoder* currentEncoder) {
931 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
932 if (!cb) return 0;
933
934 auto lastEncoder = cb->lastUsedEncoder;
935
936 if (lastEncoder == currentEncoder) return 0;
937
938 currentEncoder->incRef();
939
940 cb->lastUsedEncoder = currentEncoder;
941
942 if (!lastEncoder) return 0;
943
944 auto oldSeq = cb->sequenceNumber;
945 cb->sequenceNumber += 2;
946 lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1,
947 true /* do lock */);
948 lastEncoder->flush();
949 currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2,
950 true /* do lock */);
951
952 if (lastEncoder->decRef()) {
953 cb->lastUsedEncoder = nullptr;
954 }
955 return 0;
956}
957
958void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount,
959 const VkDescriptorSet* pDescriptorSets) {
960 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
961
962 if (!cb->userPtr) {
963 CommandBufferPendingDescriptorSets* newPendingSets = new CommandBufferPendingDescriptorSets;
964 cb->userPtr = newPendingSets;
965 }
966
967 CommandBufferPendingDescriptorSets* pendingSets =
968 (CommandBufferPendingDescriptorSets*)cb->userPtr;
969
970 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
971 pendingSets->sets.insert(pDescriptorSets[i]);
972 }
973}
974
975void decDescriptorSetLayoutRef(void* context, VkDevice device,
976 VkDescriptorSetLayout descriptorSetLayout,
977 const VkAllocationCallbacks* pAllocator) {
978 if (!descriptorSetLayout) return;
979
980 struct goldfish_VkDescriptorSetLayout* setLayout =
981 as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
982
983 if (0 == --setLayout->layoutInfo->refcount) {
Yilong Lie12328f2022-01-06 03:32:13 -0800984 VkEncoder* enc = (VkEncoder*)context;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700985 enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator,
986 true /* do lock */);
987 }
988}
Yilong Lie12328f2022-01-06 03:32:13 -0800989
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700990void ResourceTracker::ensureSyncDeviceFd() {
Gurchetan Singhb7feebd2024-01-23 14:12:36 -0800991#if GFXSTREAM_ENABLE_GUEST_GOLDFISH
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700992 if (mSyncDeviceFd >= 0) return;
993 mSyncDeviceFd = goldfish_sync_open();
994 if (mSyncDeviceFd >= 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700995 mesa_logd("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700996 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -0700997 mesa_logd("%s: failed to create sync device for current Vulkan process\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -0700998 }
999#endif
1000}
Yilong Lie12328f2022-01-06 03:32:13 -08001001
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001002void ResourceTracker::unregister_VkInstance(VkInstance instance) {
1003 AutoLock<RecursiveLock> lock(mLock);
Yilong Lie12328f2022-01-06 03:32:13 -08001004
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001005 auto it = info_VkInstance.find(instance);
1006 if (it == info_VkInstance.end()) return;
1007 auto info = it->second;
1008 info_VkInstance.erase(instance);
1009 lock.unlock();
1010}
Yilong Lie12328f2022-01-06 03:32:13 -08001011
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001012void ResourceTracker::unregister_VkDevice(VkDevice device) {
1013 AutoLock<RecursiveLock> lock(mLock);
Yilong Lie12328f2022-01-06 03:32:13 -08001014
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001015 auto it = info_VkDevice.find(device);
1016 if (it == info_VkDevice.end()) return;
1017 auto info = it->second;
1018 info_VkDevice.erase(device);
1019 lock.unlock();
1020}
Yilong Lie12328f2022-01-06 03:32:13 -08001021
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001022void ResourceTracker::unregister_VkCommandPool(VkCommandPool pool) {
1023 if (!pool) return;
Yilong Lie12328f2022-01-06 03:32:13 -08001024
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001025 clearCommandPool(pool);
Yilong Lie12328f2022-01-06 03:32:13 -08001026
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001027 AutoLock<RecursiveLock> lock(mLock);
1028 info_VkCommandPool.erase(pool);
1029}
Yilong Lie12328f2022-01-06 03:32:13 -08001030
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001031void ResourceTracker::unregister_VkSampler(VkSampler sampler) {
1032 if (!sampler) return;
Yilong Lie12328f2022-01-06 03:32:13 -08001033
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001034 AutoLock<RecursiveLock> lock(mLock);
1035 info_VkSampler.erase(sampler);
1036}
Yilong Lie12328f2022-01-06 03:32:13 -08001037
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001038void ResourceTracker::unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
1039 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
1040 true /* also clear pending descriptor sets */);
Yilong Lie12328f2022-01-06 03:32:13 -08001041
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001042 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
1043 if (!cb) return;
1044 if (cb->lastUsedEncoder) {
1045 cb->lastUsedEncoder->decRef();
1046 }
1047 eraseObjects(&cb->subObjects);
1048 forAllObjects(cb->poolObjects, [cb](void* commandPool) {
1049 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
1050 eraseObject(&p->subObjects, (void*)cb);
1051 });
1052 eraseObjects(&cb->poolObjects);
Yilong Lie12328f2022-01-06 03:32:13 -08001053
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001054 if (cb->userPtr) {
1055 CommandBufferPendingDescriptorSets* pendingSets =
1056 (CommandBufferPendingDescriptorSets*)cb->userPtr;
1057 delete pendingSets;
1058 }
Yilong Lie12328f2022-01-06 03:32:13 -08001059
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001060 AutoLock<RecursiveLock> lock(mLock);
1061 info_VkCommandBuffer.erase(commandBuffer);
1062}
Yilong Lie12328f2022-01-06 03:32:13 -08001063
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001064void ResourceTracker::unregister_VkQueue(VkQueue queue) {
1065 struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
1066 if (!q) return;
1067 if (q->lastUsedEncoder) {
1068 q->lastUsedEncoder->decRef();
1069 }
Yilong Lie12328f2022-01-06 03:32:13 -08001070
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001071 AutoLock<RecursiveLock> lock(mLock);
1072 info_VkQueue.erase(queue);
1073}
Yilong Lie12328f2022-01-06 03:32:13 -08001074
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001075void ResourceTracker::unregister_VkDeviceMemory(VkDeviceMemory mem) {
1076 AutoLock<RecursiveLock> lock(mLock);
1077
1078 auto it = info_VkDeviceMemory.find(mem);
1079 if (it == info_VkDeviceMemory.end()) return;
1080
1081 auto& memInfo = it->second;
1082
1083#ifdef VK_USE_PLATFORM_ANDROID_KHR
1084 if (memInfo.ahw) {
1085 auto* gralloc =
1086 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
1087 gralloc->release(memInfo.ahw);
Yilong Lie12328f2022-01-06 03:32:13 -08001088 }
David Reveman5b7c5842019-02-20 01:06:48 -05001089#endif
Lingfeng Yang9b82e332019-02-13 17:53:57 -08001090
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001091 if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
1092 zx_handle_close(memInfo.vmoHandle);
1093 }
1094
1095 info_VkDeviceMemory.erase(mem);
1096}
1097
1098void ResourceTracker::unregister_VkImage(VkImage img) {
1099 AutoLock<RecursiveLock> lock(mLock);
1100
1101 auto it = info_VkImage.find(img);
1102 if (it == info_VkImage.end()) return;
1103
1104 auto& imageInfo = it->second;
1105
1106 info_VkImage.erase(img);
1107}
1108
1109void ResourceTracker::unregister_VkBuffer(VkBuffer buf) {
1110 AutoLock<RecursiveLock> lock(mLock);
1111
1112 auto it = info_VkBuffer.find(buf);
1113 if (it == info_VkBuffer.end()) return;
1114
1115 info_VkBuffer.erase(buf);
1116}
1117
1118void ResourceTracker::unregister_VkSemaphore(VkSemaphore sem) {
1119 AutoLock<RecursiveLock> lock(mLock);
1120
1121 auto it = info_VkSemaphore.find(sem);
1122 if (it == info_VkSemaphore.end()) return;
1123
1124 auto& semInfo = it->second;
1125
1126 if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
1127 zx_handle_close(semInfo.eventHandle);
1128 }
1129
1130#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1131 if (semInfo.syncFd.value_or(-1) >= 0) {
1132 auto* syncHelper =
1133 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
1134 syncHelper->close(semInfo.syncFd.value());
1135 }
1136#endif
1137
1138 info_VkSemaphore.erase(sem);
1139}
1140
1141void ResourceTracker::unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
1142 AutoLock<RecursiveLock> lock(mLock);
1143 auto it = info_VkDescriptorUpdateTemplate.find(templ);
1144 if (it == info_VkDescriptorUpdateTemplate.end()) return;
1145
1146 auto& info = it->second;
1147 if (info.templateEntryCount) delete[] info.templateEntries;
1148 if (info.imageInfoCount) {
1149 delete[] info.imageInfoIndices;
1150 delete[] info.imageInfos;
1151 }
1152 if (info.bufferInfoCount) {
1153 delete[] info.bufferInfoIndices;
1154 delete[] info.bufferInfos;
1155 }
1156 if (info.bufferViewCount) {
1157 delete[] info.bufferViewIndices;
1158 delete[] info.bufferViews;
1159 }
1160 info_VkDescriptorUpdateTemplate.erase(it);
1161}
1162
1163void ResourceTracker::unregister_VkFence(VkFence fence) {
1164 AutoLock<RecursiveLock> lock(mLock);
1165 auto it = info_VkFence.find(fence);
1166 if (it == info_VkFence.end()) return;
1167
1168 auto& fenceInfo = it->second;
1169 (void)fenceInfo;
1170
1171#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1172 if (fenceInfo.syncFd >= 0) {
1173 auto* syncHelper =
1174 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
1175 syncHelper->close(fenceInfo.syncFd);
1176 }
1177#endif
1178
1179 info_VkFence.erase(fence);
1180}
1181
1182#ifdef VK_USE_PLATFORM_FUCHSIA
1183void ResourceTracker::unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection) {
1184 AutoLock<RecursiveLock> lock(mLock);
1185 info_VkBufferCollectionFUCHSIA.erase(collection);
1186}
1187#endif
1188
Jason Macnakeea882a2023-11-15 17:29:11 -08001189void ResourceTracker::unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001190 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
1191 delete ds->reified;
1192 info_VkDescriptorSet.erase(set);
1193}
1194
1195void ResourceTracker::unregister_VkDescriptorSet(VkDescriptorSet set) {
1196 if (!set) return;
1197
1198 AutoLock<RecursiveLock> lock(mLock);
1199 unregister_VkDescriptorSet_locked(set);
1200}
1201
1202void ResourceTracker::unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
1203 if (!setLayout) return;
1204
1205 AutoLock<RecursiveLock> lock(mLock);
1206 delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
1207 info_VkDescriptorSetLayout.erase(setLayout);
1208}
1209
1210void ResourceTracker::freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device,
1211 uint32_t descriptorSetCount,
1212 const VkDescriptorSet* sets) {
1213 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
1214 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
1215 if (ds->reified->allocationPending) {
1216 unregister_VkDescriptorSet(sets[i]);
1217 delete_goldfish_VkDescriptorSet(sets[i]);
1218 } else {
1219 enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
1220 }
1221 }
1222}
1223
1224void ResourceTracker::clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device,
1225 VkDescriptorPool pool) {
1226 std::vector<VkDescriptorSet> toClear =
1227 clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate);
1228
1229 for (auto set : toClear) {
1230 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
1231 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
1232 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
1233 }
1234 unregister_VkDescriptorSet(set);
1235 delete_goldfish_VkDescriptorSet(set);
1236 }
1237}
1238
1239void ResourceTracker::unregister_VkDescriptorPool(VkDescriptorPool pool) {
1240 if (!pool) return;
1241
1242 AutoLock<RecursiveLock> lock(mLock);
1243
1244 struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
1245 delete dp->allocInfo;
1246
1247 info_VkDescriptorPool.erase(pool);
1248}
1249
1250void ResourceTracker::deviceMemoryTransform_fromhost(VkDeviceMemory* memory, uint32_t memoryCount,
1251 VkDeviceSize* offset, uint32_t offsetCount,
1252 VkDeviceSize* size, uint32_t sizeCount,
1253 uint32_t* typeIndex, uint32_t typeIndexCount,
1254 uint32_t* typeBits, uint32_t typeBitsCount) {
1255 (void)memory;
1256 (void)memoryCount;
1257 (void)offset;
1258 (void)offsetCount;
1259 (void)size;
1260 (void)sizeCount;
1261 (void)typeIndex;
1262 (void)typeIndexCount;
1263 (void)typeBits;
1264 (void)typeBitsCount;
1265}
1266
1267void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
1268 VkExternalMemoryProperties* pProperties, uint32_t) {
1269 VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
1270#ifdef VK_USE_PLATFORM_FUCHSIA
1271 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
1272#endif // VK_USE_PLATFORM_FUCHSIA
1273#ifdef VK_USE_PLATFORM_ANDROID_KHR
1274 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
1275 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
1276#endif // VK_USE_PLATFORM_ANDROID_KHR
1277 if (supportedHandleType) {
1278 pProperties->compatibleHandleTypes &= supportedHandleType;
1279 pProperties->exportFromImportedHandleTypes &= supportedHandleType;
1280 }
1281}
1282
1283void ResourceTracker::setInstanceInfo(VkInstance instance, uint32_t enabledExtensionCount,
1284 const char* const* ppEnabledExtensionNames,
1285 uint32_t apiVersion) {
1286 AutoLock<RecursiveLock> lock(mLock);
1287 auto& info = info_VkInstance[instance];
1288 info.highestApiVersion = apiVersion;
1289
1290 if (!ppEnabledExtensionNames) return;
1291
1292 for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1293 info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1294 }
1295}
1296
1297void ResourceTracker::setDeviceInfo(VkDevice device, VkPhysicalDevice physdev,
1298 VkPhysicalDeviceProperties props,
1299 VkPhysicalDeviceMemoryProperties memProps,
1300 uint32_t enabledExtensionCount,
1301 const char* const* ppEnabledExtensionNames, const void* pNext) {
1302 AutoLock<RecursiveLock> lock(mLock);
1303 auto& info = info_VkDevice[device];
1304 info.physdev = physdev;
1305 info.props = props;
1306 info.memProps = memProps;
1307 info.apiVersion = props.apiVersion;
1308
1309 const VkBaseInStructure* extensionCreateInfo =
1310 reinterpret_cast<const VkBaseInStructure*>(pNext);
1311 while (extensionCreateInfo) {
1312 if (extensionCreateInfo->sType ==
1313 VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
1314 auto deviceMemoryReportCreateInfo =
1315 reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(
1316 extensionCreateInfo);
1317 if (deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
1318 info.deviceMemoryReportCallbacks.emplace_back(
1319 deviceMemoryReportCreateInfo->pfnUserCallback,
1320 deviceMemoryReportCreateInfo->pUserData);
1321 }
1322 }
1323 extensionCreateInfo = extensionCreateInfo->pNext;
1324 }
1325
1326 if (!ppEnabledExtensionNames) return;
1327
1328 for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1329 info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1330 }
1331}
1332
1333void ResourceTracker::setDeviceMemoryInfo(VkDevice device, VkDeviceMemory memory,
1334 VkDeviceSize allocationSize, uint8_t* ptr,
1335 uint32_t memoryTypeIndex, AHardwareBuffer* ahw,
Gurchetan Singhc4444b82023-09-19 08:06:20 -07001336 bool imported, zx_handle_t vmoHandle,
Jason Macnak6d3d7b22024-04-01 16:48:53 -07001337 VirtGpuResourcePtr blobPtr) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001338 AutoLock<RecursiveLock> lock(mLock);
1339 auto& info = info_VkDeviceMemory[memory];
1340
1341 info.device = device;
1342 info.allocationSize = allocationSize;
1343 info.ptr = ptr;
1344 info.memoryTypeIndex = memoryTypeIndex;
1345#ifdef VK_USE_PLATFORM_ANDROID_KHR
1346 info.ahw = ahw;
1347#endif
1348 info.imported = imported;
1349 info.vmoHandle = vmoHandle;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07001350 info.blobPtr = blobPtr;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001351}
1352
1353void ResourceTracker::setImageInfo(VkImage image, VkDevice device,
1354 const VkImageCreateInfo* pCreateInfo) {
1355 AutoLock<RecursiveLock> lock(mLock);
1356 auto& info = info_VkImage[image];
1357
1358 info.device = device;
1359 info.createInfo = *pCreateInfo;
1360}
1361
1362uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) {
1363 AutoLock<RecursiveLock> lock(mLock);
1364 const auto it = info_VkDeviceMemory.find(memory);
1365 if (it == info_VkDeviceMemory.end()) return nullptr;
1366
1367 const auto& info = it->second;
1368 return info.ptr;
1369}
1370
1371VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) {
1372 AutoLock<RecursiveLock> lock(mLock);
1373 const auto it = info_VkDeviceMemory.find(memory);
1374 if (it == info_VkDeviceMemory.end()) return 0;
1375
1376 const auto& info = it->second;
1377 return info.allocationSize;
1378}
1379
1380bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) const {
1381 AutoLock<RecursiveLock> lock(mLock);
1382 const auto it = info_VkDeviceMemory.find(range.memory);
1383 if (it == info_VkDeviceMemory.end()) return false;
1384 const auto& info = it->second;
1385
1386 if (!info.ptr) return false;
1387
1388 VkDeviceSize offset = range.offset;
1389 VkDeviceSize size = range.size;
1390
1391 if (size == VK_WHOLE_SIZE) {
1392 return offset <= info.allocationSize;
1393 }
1394
1395 return offset + size <= info.allocationSize;
1396}
1397
1398void ResourceTracker::setupCaps(uint32_t& noRenderControlEnc) {
1399 VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1400 mCaps = instance->getCaps();
1401
1402 // Delete once goldfish Linux drivers are gone
1403 if (mCaps.vulkanCapset.protocolVersion == 0) {
1404 mCaps.vulkanCapset.colorBufferMemoryIndex = 0xFFFFFFFF;
1405 } else {
1406 // Don't query the render control encoder for features, since for virtio-gpu the
1407 // capabilities provide versioning. Set features to be unconditionally true, since
1408 // using virtio-gpu encompasses all prior goldfish features. mFeatureInfo should be
1409 // deprecated in favor of caps.
1410
1411 mFeatureInfo.reset(new EmulatorFeatureInfo);
1412
1413 mFeatureInfo->hasVulkanNullOptionalStrings = true;
1414 mFeatureInfo->hasVulkanIgnoredHandles = true;
1415 mFeatureInfo->hasVulkanShaderFloat16Int8 = true;
1416 mFeatureInfo->hasVulkanQueueSubmitWithCommands = true;
1417 mFeatureInfo->hasDeferredVulkanCommands = true;
1418 mFeatureInfo->hasVulkanAsyncQueueSubmit = true;
1419 mFeatureInfo->hasVulkanCreateResourcesWithRequirements = true;
1420 mFeatureInfo->hasVirtioGpuNext = true;
1421 mFeatureInfo->hasVirtioGpuNativeSync = true;
1422 mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate = true;
1423 mFeatureInfo->hasVulkanAsyncQsri = true;
1424
1425 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1426 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1427 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1428 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1429 }
1430
1431 noRenderControlEnc = mCaps.vulkanCapset.noRenderControlEnc;
1432}
1433
1434void ResourceTracker::setupFeatures(const EmulatorFeatureInfo* features) {
1435 if (!features || mFeatureInfo) return;
1436 mFeatureInfo.reset(new EmulatorFeatureInfo);
1437 *mFeatureInfo = *features;
Jason Macnak650c0c02023-07-20 16:06:53 -07001438
1439#if defined(__ANDROID__)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001440 if (mFeatureInfo->hasDirectMem) {
1441 mGoldfishAddressSpaceBlockProvider.reset(
1442 new GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType::NoSubdevice));
1443 }
1444#endif // defined(__ANDROID__)
1445
1446#ifdef VK_USE_PLATFORM_FUCHSIA
1447 if (mFeatureInfo->hasVulkan) {
1448 fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{zx::channel(
1449 GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
1450 if (!channel) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07001451 mesa_loge("failed to open control device");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001452 abort();
1453 }
1454 mControlDevice =
1455 fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(std::move(channel));
1456
1457 fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
1458 zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
1459 if (!sysmem_channel) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07001460 mesa_loge("failed to open sysmem connection");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001461 }
1462 mSysmemAllocator =
1463 fidl::WireSyncClient<fuchsia_sysmem::Allocator>(std::move(sysmem_channel));
1464 char name[ZX_MAX_NAME_LEN] = {};
1465 zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
1466 std::string client_name(name);
1467 client_name += "-goldfish";
1468 zx_info_handle_basic_t info;
1469 zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info), nullptr,
1470 nullptr);
1471 mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
1472 info.koid);
1473 }
1474#endif
1475
1476 if (mFeatureInfo->hasVulkanNullOptionalStrings) {
1477 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1478 }
1479 if (mFeatureInfo->hasVulkanIgnoredHandles) {
1480 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1481 }
1482 if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
1483 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1484 }
1485 if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
1486 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1487 }
1488}
1489
1490void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
1491 ResourceTracker::threadingCallbacks = callbacks;
1492}
1493
1494bool ResourceTracker::hostSupportsVulkan() const {
1495 if (!mFeatureInfo) return false;
1496
1497 return mFeatureInfo->hasVulkan;
1498}
1499
1500bool ResourceTracker::usingDirectMapping() const { return true; }
1501
1502uint32_t ResourceTracker::getStreamFeatures() const { return ResourceTracker::streamFeatureBits; }
1503
1504bool ResourceTracker::supportsDeferredCommands() const {
1505 if (!mFeatureInfo) return false;
1506 return mFeatureInfo->hasDeferredVulkanCommands;
1507}
1508
1509bool ResourceTracker::supportsAsyncQueueSubmit() const {
1510 if (!mFeatureInfo) return false;
1511 return mFeatureInfo->hasVulkanAsyncQueueSubmit;
1512}
1513
1514bool ResourceTracker::supportsCreateResourcesWithRequirements() const {
1515 if (!mFeatureInfo) return false;
1516 return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
1517}
1518
1519int ResourceTracker::getHostInstanceExtensionIndex(const std::string& extName) const {
1520 int i = 0;
1521 for (const auto& prop : mHostInstanceExtensions) {
1522 if (extName == std::string(prop.extensionName)) {
1523 return i;
1524 }
1525 ++i;
1526 }
1527 return -1;
1528}
1529
1530int ResourceTracker::getHostDeviceExtensionIndex(const std::string& extName) const {
1531 int i = 0;
1532 for (const auto& prop : mHostDeviceExtensions) {
1533 if (extName == std::string(prop.extensionName)) {
1534 return i;
1535 }
1536 ++i;
1537 }
1538 return -1;
1539}
1540
1541void ResourceTracker::deviceMemoryTransform_tohost(VkDeviceMemory* memory, uint32_t memoryCount,
1542 VkDeviceSize* offset, uint32_t offsetCount,
1543 VkDeviceSize* size, uint32_t sizeCount,
1544 uint32_t* typeIndex, uint32_t typeIndexCount,
1545 uint32_t* typeBits, uint32_t typeBitsCount) {
1546 (void)memoryCount;
1547 (void)offsetCount;
1548 (void)sizeCount;
1549 (void)typeIndex;
1550 (void)typeIndexCount;
1551 (void)typeBits;
1552 (void)typeBitsCount;
1553
1554 if (memory) {
1555 AutoLock<RecursiveLock> lock(mLock);
1556
1557 for (uint32_t i = 0; i < memoryCount; ++i) {
1558 VkDeviceMemory mem = memory[i];
1559
1560 auto it = info_VkDeviceMemory.find(mem);
1561 if (it == info_VkDeviceMemory.end()) return;
1562
1563 const auto& info = it->second;
1564
1565 if (!info.coherentMemory) continue;
1566
1567 memory[i] = info.coherentMemory->getDeviceMemory();
1568
1569 if (offset) {
1570 offset[i] = info.coherentMemoryOffset + offset[i];
1571 }
1572
1573 if (size && size[i] == VK_WHOLE_SIZE) {
1574 size[i] = info.allocationSize;
1575 }
1576
1577 // TODO
1578 (void)memory;
1579 (void)offset;
1580 (void)size;
1581 }
1582 }
1583}
1584
1585uint32_t ResourceTracker::getColorBufferMemoryIndex(void* context, VkDevice device) {
1586 // Create test image to get the memory requirements
1587 VkEncoder* enc = (VkEncoder*)context;
1588 VkImageCreateInfo createInfo = {
1589 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1590 .imageType = VK_IMAGE_TYPE_2D,
1591 .format = VK_FORMAT_R8G8B8A8_UNORM,
1592 .extent = {64, 64, 1},
1593 .mipLevels = 1,
1594 .arrayLayers = 1,
1595 .samples = VK_SAMPLE_COUNT_1_BIT,
1596 .tiling = VK_IMAGE_TILING_OPTIMAL,
1597 .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
1598 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
1599 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
Yahan Zhou4e6f4df2023-10-17 10:45:03 -07001600 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001601 };
1602 VkImage image = VK_NULL_HANDLE;
1603 VkResult res = enc->vkCreateImage(device, &createInfo, nullptr, &image, true /* do lock */);
1604
1605 if (res != VK_SUCCESS) {
1606 return 0;
1607 }
1608
1609 VkMemoryRequirements memReqs;
1610 enc->vkGetImageMemoryRequirements(device, image, &memReqs, true /* do lock */);
1611 enc->vkDestroyImage(device, image, nullptr, true /* do lock */);
1612
1613 const VkPhysicalDeviceMemoryProperties& memProps =
1614 getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
1615
1616 // Currently, host looks for the last index that has with memory
1617 // property type VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
1618 VkMemoryPropertyFlags memoryProperty = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1619 for (int i = VK_MAX_MEMORY_TYPES - 1; i >= 0; --i) {
1620 if ((memReqs.memoryTypeBits & (1u << i)) &&
1621 (memProps.memoryTypes[i].propertyFlags & memoryProperty)) {
1622 return i;
1623 }
1624 }
1625
1626 return 0;
1627}
1628
1629VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
1630 void* context, VkResult, const char*, uint32_t* pPropertyCount,
1631 VkExtensionProperties* pProperties) {
1632 std::vector<const char*> allowedExtensionNames = {
1633 "VK_KHR_get_physical_device_properties2",
1634 "VK_KHR_sampler_ycbcr_conversion",
1635#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1636 "VK_KHR_external_semaphore_capabilities",
1637 "VK_KHR_external_memory_capabilities",
1638 "VK_KHR_external_fence_capabilities",
Igor Chernyshev5ef37b12023-12-19 15:58:51 -08001639 "VK_EXT_debug_utils",
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001640#endif
1641 };
1642
1643 VkEncoder* enc = (VkEncoder*)context;
1644
1645 // Only advertise a select set of extensions.
1646 if (mHostInstanceExtensions.empty()) {
1647 uint32_t hostPropCount = 0;
1648 enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr,
1649 true /* do lock */);
1650 mHostInstanceExtensions.resize(hostPropCount);
1651
1652 VkResult hostRes = enc->vkEnumerateInstanceExtensionProperties(
1653 nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
1654
1655 if (hostRes != VK_SUCCESS) {
1656 return hostRes;
1657 }
1658 }
1659
1660 std::vector<VkExtensionProperties> filteredExts;
1661
1662 for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1663 auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
1664 if (extIndex != -1) {
1665 filteredExts.push_back(mHostInstanceExtensions[extIndex]);
1666 }
1667 }
1668
1669 VkExtensionProperties anbExtProps[] = {
1670#ifdef VK_USE_PLATFORM_FUCHSIA
1671 {"VK_KHR_external_memory_capabilities", 1},
1672 {"VK_KHR_external_semaphore_capabilities", 1},
1673#endif
1674 };
1675
1676 for (auto& anbExtProp : anbExtProps) {
1677 filteredExts.push_back(anbExtProp);
1678 }
1679
1680 // Spec:
1681 //
1682 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1683 //
1684 // If pProperties is NULL, then the number of extensions properties
1685 // available is returned in pPropertyCount. Otherwise, pPropertyCount
1686 // must point to a variable set by the user to the number of elements
1687 // in the pProperties array, and on return the variable is overwritten
1688 // with the number of structures actually written to pProperties. If
1689 // pPropertyCount is less than the number of extension properties
1690 // available, at most pPropertyCount structures will be written. If
1691 // pPropertyCount is smaller than the number of extensions available,
1692 // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1693 // that not all the available properties were returned.
1694 //
1695 // pPropertyCount must be a valid pointer to a uint32_t value
1696 if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1697
1698 if (!pProperties) {
1699 *pPropertyCount = (uint32_t)filteredExts.size();
1700 return VK_SUCCESS;
1701 } else {
1702 auto actualExtensionCount = (uint32_t)filteredExts.size();
1703 if (*pPropertyCount > actualExtensionCount) {
1704 *pPropertyCount = actualExtensionCount;
1705 }
1706
1707 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1708 pProperties[i] = filteredExts[i];
1709 }
1710
1711 if (actualExtensionCount > *pPropertyCount) {
1712 return VK_INCOMPLETE;
1713 }
1714
1715 return VK_SUCCESS;
1716 }
1717}
1718
1719VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties(
1720 void* context, VkResult, VkPhysicalDevice physdev, const char*, uint32_t* pPropertyCount,
1721 VkExtensionProperties* pProperties) {
1722 std::vector<const char*> allowedExtensionNames = {
1723 "VK_KHR_vulkan_memory_model",
1724 "VK_KHR_buffer_device_address",
1725 "VK_KHR_maintenance1",
1726 "VK_KHR_maintenance2",
1727 "VK_KHR_maintenance3",
1728 "VK_KHR_bind_memory2",
1729 "VK_KHR_dedicated_allocation",
1730 "VK_KHR_get_memory_requirements2",
1731 "VK_KHR_sampler_ycbcr_conversion",
1732 "VK_KHR_shader_float16_int8",
1733 // Timeline semaphores buggy in newer NVIDIA drivers
1734 // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
1735#ifndef VK_USE_PLATFORM_ANDROID_KHR
1736 "VK_KHR_timeline_semaphore",
1737#endif
1738 "VK_AMD_gpu_shader_half_float",
1739 "VK_NV_shader_subgroup_partitioned",
1740 "VK_KHR_shader_subgroup_extended_types",
1741 "VK_EXT_subgroup_size_control",
1742 "VK_EXT_provoking_vertex",
1743 "VK_EXT_line_rasterization",
1744 "VK_KHR_shader_terminate_invocation",
1745 "VK_EXT_transform_feedback",
1746 "VK_EXT_primitive_topology_list_restart",
1747 "VK_EXT_index_type_uint8",
1748 "VK_EXT_load_store_op_none",
1749 "VK_EXT_swapchain_colorspace",
1750 "VK_EXT_image_robustness",
1751 "VK_EXT_custom_border_color",
1752 "VK_EXT_shader_stencil_export",
1753 "VK_KHR_image_format_list",
1754 "VK_KHR_incremental_present",
1755 "VK_KHR_pipeline_executable_properties",
1756 "VK_EXT_queue_family_foreign",
Igor Chernyshev5ef37b12023-12-19 15:58:51 -08001757 "VK_EXT_scalar_block_layout",
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001758 "VK_KHR_descriptor_update_template",
1759 "VK_KHR_storage_buffer_storage_class",
Gurchetan Singhc4444b82023-09-19 08:06:20 -07001760 "VK_EXT_depth_clip_enable",
Igor Chernyshev54676712024-01-15 13:12:49 -08001761 "VK_KHR_create_renderpass2",
Gurchetan Singh30c4bbc2024-06-13 14:59:51 +00001762 "VK_EXT_vertex_attribute_divisor",
Aaron Germuth6bde7712024-05-30 20:02:09 +00001763 "VK_EXT_host_query_reset",
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001764#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1765 "VK_KHR_external_semaphore",
1766 "VK_KHR_external_semaphore_fd",
1767 // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
1768 "VK_KHR_external_memory",
1769 "VK_KHR_external_fence",
1770 "VK_KHR_external_fence_fd",
1771 "VK_EXT_device_memory_report",
1772#endif
Gurchetan Singhc4444b82023-09-19 08:06:20 -07001773#if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001774 "VK_KHR_imageless_framebuffer",
1775#endif
Yahan Zhoua1531d12023-10-04 15:28:19 -07001776 // Vulkan 1.3
1777 "VK_KHR_synchronization2",
1778 "VK_EXT_private_data",
Gurchetan Singh91db8862024-06-21 09:43:55 -07001779 "VK_EXT_color_write_enable",
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001780 };
1781
1782 VkEncoder* enc = (VkEncoder*)context;
1783
1784 if (mHostDeviceExtensions.empty()) {
1785 uint32_t hostPropCount = 0;
1786 enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr,
1787 true /* do lock */);
1788 mHostDeviceExtensions.resize(hostPropCount);
1789
1790 VkResult hostRes = enc->vkEnumerateDeviceExtensionProperties(
1791 physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
1792
1793 if (hostRes != VK_SUCCESS) {
1794 return hostRes;
1795 }
1796 }
1797
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001798 std::vector<VkExtensionProperties> filteredExts;
1799
1800 for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1801 auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
1802 if (extIndex != -1) {
1803 filteredExts.push_back(mHostDeviceExtensions[extIndex]);
1804 }
1805 }
1806
1807 VkExtensionProperties anbExtProps[] = {
1808#ifdef VK_USE_PLATFORM_ANDROID_KHR
1809 {"VK_ANDROID_native_buffer", 7},
1810#endif
1811#ifdef VK_USE_PLATFORM_FUCHSIA
1812 {"VK_KHR_external_memory", 1},
1813 {"VK_KHR_external_semaphore", 1},
1814 {"VK_FUCHSIA_external_semaphore", 1},
1815#endif
1816 };
1817
1818 for (auto& anbExtProp : anbExtProps) {
1819 filteredExts.push_back(anbExtProp);
1820 }
1821
Gurchetan Singh3057f912024-04-29 18:07:14 -07001822 /*
1823 * GfxstreamEnd2EndVkTest::DeviceMemoryReport always assumes the memory report
1824 * extension is present. It's is filtered out when sent host side, since for a
1825 * virtual GPU this is quite difficult to implement.
1826 *
1827 * Mesa runtime checks physical device features. So if the test tries to enable
1828 * device level extension without it definitely existing, the test will fail.
1829 *
1830 * The test can also be modified to check VkPhysicalDeviceDeviceMemoryReportFeaturesEXT,
1831 * but that's more involved. Work around this by always advertising the extension.
1832 * Tracking bug: b/338270042
1833 */
1834 filteredExts.push_back(VkExtensionProperties{"VK_EXT_device_memory_report", 1});
1835
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001836#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1837 bool hostSupportsExternalFenceFd =
1838 getHostDeviceExtensionIndex("VK_KHR_external_fence_fd") != -1;
1839 if (!hostSupportsExternalFenceFd) {
1840 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_fence_fd", 1});
1841 }
1842#endif
1843
1844#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Aaron Rubyaef68fd2024-01-16 16:55:43 -05001845 bool hostHasPosixExternalSemaphore =
1846 getHostDeviceExtensionIndex("VK_KHR_external_semaphore_fd") != -1;
1847 if (!hostHasPosixExternalSemaphore) {
1848 // Always advertise posix external semaphore capabilities on Android/Linux.
1849 // SYNC_FD handles will always work, regardless of host support. Support
1850 // for non-sync, opaque FDs, depends on host driver support, but will
1851 // be handled accordingly by host.
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001852 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_semaphore_fd", 1});
1853 }
1854#endif
1855
1856 bool win32ExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_win32") != -1;
1857 bool posixExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_fd") != -1;
Serdar Kocdemirfa050d12024-06-26 22:34:31 +01001858 //TODO(b/349066492): this should check external_memory_metal extension when it's ready
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001859 bool moltenVkExtAvailable = getHostDeviceExtensionIndex("VK_MVK_moltenvk") != -1;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07001860 bool qnxExtMemAvailable =
1861 getHostDeviceExtensionIndex("VK_QNX_external_memory_screen_buffer") != -1;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001862
1863 bool hostHasExternalMemorySupport =
Gurchetan Singhc4444b82023-09-19 08:06:20 -07001864 win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable || qnxExtMemAvailable;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001865
1866 if (hostHasExternalMemorySupport) {
1867#ifdef VK_USE_PLATFORM_ANDROID_KHR
1868 filteredExts.push_back(
1869 VkExtensionProperties{"VK_ANDROID_external_memory_android_hardware_buffer", 7});
1870 filteredExts.push_back(VkExtensionProperties{"VK_EXT_queue_family_foreign", 1});
1871#endif
1872#ifdef VK_USE_PLATFORM_FUCHSIA
1873 filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_external_memory", 1});
1874 filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_buffer_collection", 1});
1875#endif
1876#if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__)
1877 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_memory_fd", 1});
1878 filteredExts.push_back(VkExtensionProperties{"VK_EXT_external_memory_dma_buf", 1});
1879#endif
1880 }
1881
Jason Macnakd7c55fb2023-12-08 13:15:44 -08001882 // NOTE: the Vulkan Loader's trampoline functions will remove duplicates. This can lead
1883 // to lead errors if this function returns VK_SUCCESS with N elements (including a duplicate)
1884 // but the Vulkan Loader's trampoline function returns VK_INCOMPLETE with N-1 elements
1885 // (without the duplicate).
1886 std::sort(filteredExts.begin(),
1887 filteredExts.end(),
1888 [](const VkExtensionProperties& a,
1889 const VkExtensionProperties& b) {
1890 return strcmp(a.extensionName, b.extensionName) < 0;
1891 });
1892 filteredExts.erase(std::unique(filteredExts.begin(),
1893 filteredExts.end(),
1894 [](const VkExtensionProperties& a,
1895 const VkExtensionProperties& b) {
1896 return strcmp(a.extensionName, b.extensionName) == 0;
1897 }),
1898 filteredExts.end());
1899
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001900 // Spec:
1901 //
1902 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
1903 //
1904 // pPropertyCount is a pointer to an integer related to the number of
1905 // extension properties available or queried, and is treated in the
1906 // same fashion as the
1907 // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
1908 //
1909 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1910 //
1911 // If pProperties is NULL, then the number of extensions properties
1912 // available is returned in pPropertyCount. Otherwise, pPropertyCount
1913 // must point to a variable set by the user to the number of elements
1914 // in the pProperties array, and on return the variable is overwritten
1915 // with the number of structures actually written to pProperties. If
1916 // pPropertyCount is less than the number of extension properties
1917 // available, at most pPropertyCount structures will be written. If
1918 // pPropertyCount is smaller than the number of extensions available,
1919 // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1920 // that not all the available properties were returned.
1921 //
1922 // pPropertyCount must be a valid pointer to a uint32_t value
1923
1924 if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1925
1926 if (!pProperties) {
1927 *pPropertyCount = (uint32_t)filteredExts.size();
1928 return VK_SUCCESS;
1929 } else {
1930 auto actualExtensionCount = (uint32_t)filteredExts.size();
1931 if (*pPropertyCount > actualExtensionCount) {
1932 *pPropertyCount = actualExtensionCount;
1933 }
1934
1935 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1936 pProperties[i] = filteredExts[i];
1937 }
1938
1939 if (actualExtensionCount > *pPropertyCount) {
1940 return VK_INCOMPLETE;
1941 }
1942
1943 return VK_SUCCESS;
1944 }
1945}
1946
1947VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(void* context, VkResult,
1948 VkInstance instance,
1949 uint32_t* pPhysicalDeviceCount,
1950 VkPhysicalDevice* pPhysicalDevices) {
1951 VkEncoder* enc = (VkEncoder*)context;
1952
1953 if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
1954
1955 if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
1956
1957 AutoLock<RecursiveLock> lock(mLock);
1958
1959 // When this function is called, we actually need to do two things:
1960 // - Get full information about physical devices from the host,
1961 // even if the guest did not ask for it
1962 // - Serve the guest query according to the spec:
1963 //
1964 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1965
1966 auto it = info_VkInstance.find(instance);
1967
1968 if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
1969
1970 auto& info = it->second;
1971
1972 // Get the full host information here if it doesn't exist already.
1973 if (info.physicalDevices.empty()) {
1974 uint32_t hostPhysicalDeviceCount = 0;
1975
1976 lock.unlock();
1977 VkResult countRes = enc->vkEnumeratePhysicalDevices(instance, &hostPhysicalDeviceCount,
1978 nullptr, false /* no lock */);
1979 lock.lock();
1980
1981 if (countRes != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07001982 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001983 "%s: failed: could not count host physical devices. "
1984 "Error %d\n",
1985 __func__, countRes);
1986 return countRes;
1987 }
1988
1989 info.physicalDevices.resize(hostPhysicalDeviceCount);
1990
1991 lock.unlock();
1992 VkResult enumRes = enc->vkEnumeratePhysicalDevices(
1993 instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
1994 lock.lock();
1995
1996 if (enumRes != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07001997 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07001998 "%s: failed: could not retrieve host physical devices. "
1999 "Error %d\n",
2000 __func__, enumRes);
2001 return enumRes;
2002 }
2003 }
2004
2005 // Serve the guest query according to the spec.
2006 //
2007 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
2008 //
2009 // If pPhysicalDevices is NULL, then the number of physical devices
2010 // available is returned in pPhysicalDeviceCount. Otherwise,
2011 // pPhysicalDeviceCount must point to a variable set by the user to the
2012 // number of elements in the pPhysicalDevices array, and on return the
2013 // variable is overwritten with the number of handles actually written
2014 // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
2015 // of physical devices available, at most pPhysicalDeviceCount
2016 // structures will be written. If pPhysicalDeviceCount is smaller than
2017 // the number of physical devices available, VK_INCOMPLETE will be
2018 // returned instead of VK_SUCCESS, to indicate that not all the
2019 // available physical devices were returned.
2020
2021 if (!pPhysicalDevices) {
2022 *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
2023 return VK_SUCCESS;
2024 } else {
2025 uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
2026 uint32_t toWrite =
2027 actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
2028
2029 for (uint32_t i = 0; i < toWrite; ++i) {
2030 pPhysicalDevices[i] = info.physicalDevices[i];
2031 }
2032
2033 *pPhysicalDeviceCount = toWrite;
2034
2035 if (actualDeviceCount > *pPhysicalDeviceCount) {
2036 return VK_INCOMPLETE;
2037 }
2038
2039 return VK_SUCCESS;
2040 }
2041}
2042
2043void ResourceTracker::on_vkGetPhysicalDeviceProperties(void*, VkPhysicalDevice,
Gurchetan Singhc4444b82023-09-19 08:06:20 -07002044 VkPhysicalDeviceProperties* pProperties) {
2045#if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
2046 if (pProperties) {
2047 if (VK_PHYSICAL_DEVICE_TYPE_CPU == pProperties->deviceType) {
2048 /* For Linux guest: Even if host driver reports DEVICE_TYPE_CPU,
2049 * override this to VIRTUAL_GPU, otherwise Linux DRM interfaces
2050 * will take unexpected code paths to deal with "software" driver
2051 */
2052 pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
2053 }
2054 }
2055#endif
2056}
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002057
2058void ResourceTracker::on_vkGetPhysicalDeviceFeatures2(void*, VkPhysicalDevice,
2059 VkPhysicalDeviceFeatures2* pFeatures) {
2060 if (pFeatures) {
2061 VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2062 vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pFeatures);
2063 if (memoryReportFeaturesEXT) {
2064 memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2065 }
2066 }
2067}
2068
2069void ResourceTracker::on_vkGetPhysicalDeviceFeatures2KHR(void* context,
2070 VkPhysicalDevice physicalDevice,
2071 VkPhysicalDeviceFeatures2* pFeatures) {
2072 on_vkGetPhysicalDeviceFeatures2(context, physicalDevice, pFeatures);
2073}
2074
Gurchetan Singhc4444b82023-09-19 08:06:20 -07002075void ResourceTracker::on_vkGetPhysicalDeviceProperties2(void* context,
2076 VkPhysicalDevice physicalDevice,
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002077 VkPhysicalDeviceProperties2* pProperties) {
2078 if (pProperties) {
2079 VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2080 vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
2081 if (memoryReportFeaturesEXT) {
2082 memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2083 }
Gurchetan Singhc4444b82023-09-19 08:06:20 -07002084 on_vkGetPhysicalDeviceProperties(context, physicalDevice, &pProperties->properties);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002085 }
2086}
2087
2088void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
2089 void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties) {
2090 on_vkGetPhysicalDeviceProperties2(context, physicalDevice, pProperties);
2091}
2092
2093void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
2094 void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* out) {
2095 // gfxstream decides which physical device to expose to the guest on startup.
2096 // Otherwise, we would need a physical device to properties mapping.
2097 *out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice);
2098}
2099
2100void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2(
2101 void*, VkPhysicalDevice physdev, VkPhysicalDeviceMemoryProperties2* out) {
2102 on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties);
2103}
2104
2105void ResourceTracker::on_vkGetDeviceQueue(void*, VkDevice device, uint32_t, uint32_t,
2106 VkQueue* pQueue) {
2107 AutoLock<RecursiveLock> lock(mLock);
2108 info_VkQueue[*pQueue].device = device;
2109}
2110
2111void ResourceTracker::on_vkGetDeviceQueue2(void*, VkDevice device, const VkDeviceQueueInfo2*,
2112 VkQueue* pQueue) {
2113 AutoLock<RecursiveLock> lock(mLock);
2114 info_VkQueue[*pQueue].device = device;
2115}
2116
2117VkResult ResourceTracker::on_vkCreateInstance(void* context, VkResult input_result,
2118 const VkInstanceCreateInfo* createInfo,
2119 const VkAllocationCallbacks*, VkInstance* pInstance) {
2120 if (input_result != VK_SUCCESS) return input_result;
2121
2122 VkEncoder* enc = (VkEncoder*)context;
2123
2124 uint32_t apiVersion;
2125 VkResult enumInstanceVersionRes =
2126 enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
2127
2128 setInstanceInfo(*pInstance, createInfo->enabledExtensionCount,
2129 createInfo->ppEnabledExtensionNames, apiVersion);
2130
2131 return input_result;
2132}
2133
2134VkResult ResourceTracker::on_vkCreateDevice(void* context, VkResult input_result,
2135 VkPhysicalDevice physicalDevice,
2136 const VkDeviceCreateInfo* pCreateInfo,
2137 const VkAllocationCallbacks*, VkDevice* pDevice) {
2138 if (input_result != VK_SUCCESS) return input_result;
2139
2140 VkEncoder* enc = (VkEncoder*)context;
2141
2142 VkPhysicalDeviceProperties props;
2143 VkPhysicalDeviceMemoryProperties memProps;
2144 enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
2145 enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
2146
2147 setDeviceInfo(*pDevice, physicalDevice, props, memProps, pCreateInfo->enabledExtensionCount,
2148 pCreateInfo->ppEnabledExtensionNames, pCreateInfo->pNext);
2149
2150 return input_result;
2151}
2152
2153void ResourceTracker::on_vkDestroyDevice_pre(void* context, VkDevice device,
2154 const VkAllocationCallbacks*) {
2155 (void)context;
2156 AutoLock<RecursiveLock> lock(mLock);
2157
2158 auto it = info_VkDevice.find(device);
2159 if (it == info_VkDevice.end()) return;
2160
2161 for (auto itr = info_VkDeviceMemory.cbegin(); itr != info_VkDeviceMemory.cend();) {
2162 auto& memInfo = itr->second;
2163 if (memInfo.device == device) {
2164 itr = info_VkDeviceMemory.erase(itr);
2165 } else {
2166 itr++;
2167 }
2168 }
2169}
2170
Gurchetan Singhc4444b82023-09-19 08:06:20 -07002171#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
2172void updateMemoryTypeBits(uint32_t* memoryTypeBits, uint32_t memoryIndex) {
2173 *memoryTypeBits = 1u << memoryIndex;
2174}
2175#endif
2176
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002177#ifdef VK_USE_PLATFORM_ANDROID_KHR
2178
2179VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
2180 void* context, VkResult, VkDevice device, const AHardwareBuffer* buffer,
2181 VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
2182 auto grallocHelper =
2183 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
2184
2185 // Delete once goldfish Linux drivers are gone
2186 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
2187 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
2188 }
2189
2190 updateMemoryTypeBits(&pProperties->memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
2191
2192 return getAndroidHardwareBufferPropertiesANDROID(grallocHelper, buffer, pProperties);
2193}
2194
2195VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID(
2196 void*, VkResult, VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
2197 struct AHardwareBuffer** pBuffer) {
2198 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2199 if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2200
2201 AutoLock<RecursiveLock> lock(mLock);
2202
2203 auto deviceIt = info_VkDevice.find(device);
2204
2205 if (deviceIt == info_VkDevice.end()) {
2206 return VK_ERROR_INITIALIZATION_FAILED;
2207 }
2208
2209 auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2210
2211 if (memoryIt == info_VkDeviceMemory.end()) {
2212 return VK_ERROR_INITIALIZATION_FAILED;
2213 }
2214
2215 auto& info = memoryIt->second;
2216
2217 auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
2218 VkResult queryRes = getMemoryAndroidHardwareBufferANDROID(gralloc, &info.ahw);
2219
2220 if (queryRes != VK_SUCCESS) return queryRes;
2221
2222 *pBuffer = info.ahw;
2223
2224 return queryRes;
2225}
2226#endif
2227
2228#ifdef VK_USE_PLATFORM_FUCHSIA
2229VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA(
2230 void*, VkResult, VkDevice device, const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
2231 uint32_t* pHandle) {
2232 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2233 if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2234
2235 AutoLock<RecursiveLock> lock(mLock);
2236
2237 auto deviceIt = info_VkDevice.find(device);
2238
2239 if (deviceIt == info_VkDevice.end()) {
2240 return VK_ERROR_INITIALIZATION_FAILED;
2241 }
2242
2243 auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2244
2245 if (memoryIt == info_VkDeviceMemory.end()) {
2246 return VK_ERROR_INITIALIZATION_FAILED;
2247 }
2248
2249 auto& info = memoryIt->second;
2250
2251 if (info.vmoHandle == ZX_HANDLE_INVALID) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002252 mesa_loge("%s: memory cannot be exported", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002253 return VK_ERROR_INITIALIZATION_FAILED;
2254 }
2255
2256 *pHandle = ZX_HANDLE_INVALID;
2257 zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2258 return VK_SUCCESS;
2259}
2260
2261VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
2262 void*, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType,
2263 uint32_t handle, VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
2264 using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
2265 using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
2266
2267 if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
2268 return VK_ERROR_INITIALIZATION_FAILED;
2269 }
2270
2271 zx_info_handle_basic_t handleInfo;
2272 zx_status_t status = zx::unowned_vmo(handle)->get_info(ZX_INFO_HANDLE_BASIC, &handleInfo,
2273 sizeof(handleInfo), nullptr, nullptr);
2274 if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
2275 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2276 }
2277
2278 AutoLock<RecursiveLock> lock(mLock);
2279
2280 auto deviceIt = info_VkDevice.find(device);
2281
2282 if (deviceIt == info_VkDevice.end()) {
2283 return VK_ERROR_INITIALIZATION_FAILED;
2284 }
2285
2286 auto& info = deviceIt->second;
2287
2288 zx::vmo vmo_dup;
2289 status = zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
2290 if (status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002291 mesa_loge("zx_handle_duplicate() error: %d", status);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002292 return VK_ERROR_INITIALIZATION_FAILED;
2293 }
2294
2295 uint32_t memoryProperty = 0u;
2296
2297 auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
2298 if (!result.ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002299 mesa_loge("mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d", result.status());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002300 return VK_ERROR_INITIALIZATION_FAILED;
2301 }
2302 if (result.value().is_ok()) {
2303 memoryProperty = result.value().value()->info.memory_property();
2304 } else if (result.value().error_value() == ZX_ERR_NOT_FOUND) {
2305 // If a VMO is allocated while ColorBuffer/Buffer is not created,
2306 // it must be a device-local buffer, since for host-visible buffers,
2307 // ColorBuffer/Buffer is created at sysmem allocation time.
2308 memoryProperty = kMemoryPropertyDeviceLocal;
2309 } else {
2310 // Importing read-only host memory into the Vulkan driver should not
2311 // work, but it is not an error to try to do so. Returning a
2312 // VkMemoryZirconHandlePropertiesFUCHSIA with no available
Mitchell Kemberd9f3e4a2024-01-29 16:53:33 -08002313 // memoryType bits should be enough for clients. See fxbug.dev/42098398
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002314 // for other issues this this flow.
Gurchetan Singh42361f72024-05-16 17:37:11 -07002315 mesa_logw("GetBufferHandleInfo failed: %d", result.value().error_value());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002316 pProperties->memoryTypeBits = 0;
2317 return VK_SUCCESS;
2318 }
2319
2320 pProperties->memoryTypeBits = 0;
2321 for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
2322 if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
2323 (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2324 ((memoryProperty & kMemoryPropertyHostVisible) &&
2325 (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2326 pProperties->memoryTypeBits |= 1ull << i;
2327 }
2328 }
2329 return VK_SUCCESS;
2330}
2331
2332zx_koid_t getEventKoid(zx_handle_t eventHandle) {
2333 if (eventHandle == ZX_HANDLE_INVALID) {
2334 return ZX_KOID_INVALID;
2335 }
2336
2337 zx_info_handle_basic_t info;
2338 zx_status_t status = zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
2339 nullptr, nullptr);
2340 if (status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002341 mesa_loge("Cannot get object info of handle %u: %d", eventHandle, status);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002342 return ZX_KOID_INVALID;
2343 }
2344 return info.koid;
2345}
2346
2347VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA(
2348 void*, VkResult, VkDevice device, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
2349 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2350 if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2351
2352 AutoLock<RecursiveLock> lock(mLock);
2353
2354 auto deviceIt = info_VkDevice.find(device);
2355
2356 if (deviceIt == info_VkDevice.end()) {
2357 return VK_ERROR_INITIALIZATION_FAILED;
2358 }
2359
2360 auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2361
2362 if (semaphoreIt == info_VkSemaphore.end()) {
2363 return VK_ERROR_INITIALIZATION_FAILED;
2364 }
2365
2366 auto& info = semaphoreIt->second;
2367
2368 if (info.eventHandle != ZX_HANDLE_INVALID) {
2369 zx_handle_close(info.eventHandle);
2370 }
2371#if VK_HEADER_VERSION < 174
2372 info.eventHandle = pInfo->handle;
2373#else // VK_HEADER_VERSION >= 174
2374 info.eventHandle = pInfo->zirconHandle;
2375#endif // VK_HEADER_VERSION < 174
2376 if (info.eventHandle != ZX_HANDLE_INVALID) {
2377 info.eventKoid = getEventKoid(info.eventHandle);
2378 }
2379
2380 return VK_SUCCESS;
2381}
2382
2383VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA(
2384 void*, VkResult, VkDevice device, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
2385 uint32_t* pHandle) {
2386 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2387 if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2388
2389 AutoLock<RecursiveLock> lock(mLock);
2390
2391 auto deviceIt = info_VkDevice.find(device);
2392
2393 if (deviceIt == info_VkDevice.end()) {
2394 return VK_ERROR_INITIALIZATION_FAILED;
2395 }
2396
2397 auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2398
2399 if (semaphoreIt == info_VkSemaphore.end()) {
2400 return VK_ERROR_INITIALIZATION_FAILED;
2401 }
2402
2403 auto& info = semaphoreIt->second;
2404
2405 if (info.eventHandle == ZX_HANDLE_INVALID) {
2406 return VK_ERROR_INITIALIZATION_FAILED;
2407 }
2408
2409 *pHandle = ZX_HANDLE_INVALID;
2410 zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2411 return VK_SUCCESS;
2412}
2413
2414VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA(
2415 void*, VkResult, VkDevice, const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
2416 const VkAllocationCallbacks*, VkBufferCollectionFUCHSIA* pCollection) {
2417 fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
2418
2419 if (pInfo->collectionToken) {
2420 token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
2421 zx::channel(pInfo->collectionToken));
2422 } else {
2423 auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
2424 if (!endpoints.is_ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002425 mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002426 return VK_ERROR_INITIALIZATION_FAILED;
2427 }
2428
2429 auto result = mSysmemAllocator->AllocateSharedCollection(std::move(endpoints->server));
2430 if (!result.ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002431 mesa_loge("AllocateSharedCollection failed: %d", result.status());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002432 return VK_ERROR_INITIALIZATION_FAILED;
2433 }
2434 token_client = std::move(endpoints->client);
2435 }
2436
2437 auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
2438 if (!endpoints.is_ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002439 mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002440 return VK_ERROR_INITIALIZATION_FAILED;
2441 }
2442 auto [collection_client, collection_server] = std::move(endpoints.value());
2443
2444 auto result = mSysmemAllocator->BindSharedCollection(std::move(token_client),
2445 std::move(collection_server));
2446 if (!result.ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002447 mesa_loge("BindSharedCollection failed: %d", result.status());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002448 return VK_ERROR_INITIALIZATION_FAILED;
2449 }
2450
2451 auto* sysmem_collection =
2452 new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(std::move(collection_client));
2453 *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
2454
2455 register_VkBufferCollectionFUCHSIA(*pCollection);
2456 return VK_SUCCESS;
2457}
2458
2459void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(void*, VkResult, VkDevice,
2460 VkBufferCollectionFUCHSIA collection,
2461 const VkAllocationCallbacks*) {
2462 auto sysmem_collection =
2463 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2464 if (sysmem_collection) {
2465 (*sysmem_collection)->Close();
2466 }
2467 delete sysmem_collection;
2468
2469 unregister_VkBufferCollectionFUCHSIA(collection);
2470}
2471
2472SetBufferCollectionImageConstraintsResult ResourceTracker::setBufferCollectionImageConstraintsImpl(
2473 VkEncoder* enc, VkDevice device,
2474 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2475 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2476 const auto& collection = *pCollection;
2477 if (!pImageConstraintsInfo ||
2478 pImageConstraintsInfo->sType != VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002479 mesa_loge("%s: invalid pImageConstraintsInfo", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002480 return {VK_ERROR_INITIALIZATION_FAILED};
2481 }
2482
2483 if (pImageConstraintsInfo->formatConstraintsCount == 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002484 mesa_loge("%s: formatConstraintsCount must be greater than 0", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002485 abort();
2486 }
2487
2488 fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2489 defaultBufferCollectionConstraints(
2490 /* min_size_bytes */ 0,
2491 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCount,
2492 pImageConstraintsInfo->bufferCollectionConstraints.maxBufferCount,
2493 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping,
2494 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack,
2495 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack);
2496
2497 std::vector<fuchsia_sysmem::wire::ImageFormatConstraints> format_constraints;
2498
2499 VkPhysicalDevice physicalDevice;
2500 {
2501 AutoLock<RecursiveLock> lock(mLock);
2502 auto deviceIt = info_VkDevice.find(device);
2503 if (deviceIt == info_VkDevice.end()) {
2504 return {VK_ERROR_INITIALIZATION_FAILED};
2505 }
2506 physicalDevice = deviceIt->second.physdev;
2507 }
2508
2509 std::vector<uint32_t> createInfoIndex;
2510
2511 bool hasOptimalTiling = false;
2512 for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; i++) {
2513 const VkImageCreateInfo* createInfo =
2514 &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
2515 const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
2516 &pImageConstraintsInfo->pFormatConstraints[i];
2517
2518 // add ImageFormatConstraints for *optimal* tiling
2519 VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
2520 if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
2521 optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
2522 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_OPTIMAL,
2523 &constraints);
2524 if (optimalResult == VK_SUCCESS) {
2525 createInfoIndex.push_back(i);
2526 hasOptimalTiling = true;
2527 }
2528 }
2529
2530 // Add ImageFormatConstraints for *linear* tiling
2531 VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
2532 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_LINEAR, &constraints);
2533 if (linearResult == VK_SUCCESS) {
2534 createInfoIndex.push_back(i);
2535 }
2536
2537 // Update usage and BufferMemoryConstraints
2538 if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
2539 constraints.usage.vulkan |= getBufferCollectionConstraintsVulkanImageUsage(createInfo);
2540
2541 if (formatConstraints && formatConstraints->flags) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002542 mesa_logw(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002543 "%s: Non-zero flags (%08x) in image format "
2544 "constraints; this is currently not supported, see "
Mitchell Kemberd9f3e4a2024-01-29 16:53:33 -08002545 "fxbug.dev/42147900.",
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002546 __func__, formatConstraints->flags);
2547 }
2548 }
2549 }
2550
2551 // Set buffer memory constraints based on optimal/linear tiling support
2552 // and flags.
2553 VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
2554 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
2555 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
2556 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
2557 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
2558 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
2559 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
2560 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
2561 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
2562
2563 constraints.has_buffer_memory_constraints = true;
2564 auto& memory_constraints = constraints.buffer_memory_constraints;
2565 memory_constraints.cpu_domain_supported = true;
2566 memory_constraints.ram_domain_supported = true;
2567 memory_constraints.inaccessible_domain_supported =
2568 hasOptimalTiling && !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
2569 VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
2570 VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
2571 VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
2572
2573 if (memory_constraints.inaccessible_domain_supported) {
2574 memory_constraints.heap_permitted_count = 2;
2575 memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2576 memory_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2577 } else {
2578 memory_constraints.heap_permitted_count = 1;
2579 memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2580 }
2581
2582 if (constraints.image_format_constraints_count == 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002583 mesa_loge("%s: none of the specified formats is supported by device", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002584 return {VK_ERROR_FORMAT_NOT_SUPPORTED};
2585 }
2586
2587 constexpr uint32_t kVulkanPriority = 5;
2588 const char kName[] = "GoldfishSysmemShared";
2589 collection->SetName(kVulkanPriority, fidl::StringView(kName));
2590
2591 auto result = collection->SetConstraints(true, constraints);
2592 if (!result.ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002593 mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002594 return {VK_ERROR_INITIALIZATION_FAILED};
2595 }
2596
2597 return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
2598}
2599
2600VkResult ResourceTracker::setBufferCollectionImageConstraintsFUCHSIA(
2601 VkEncoder* enc, VkDevice device,
2602 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2603 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2604 const auto& collection = *pCollection;
2605
2606 auto setConstraintsResult =
2607 setBufferCollectionImageConstraintsImpl(enc, device, pCollection, pImageConstraintsInfo);
2608 if (setConstraintsResult.result != VK_SUCCESS) {
2609 return setConstraintsResult.result;
2610 }
2611
2612 // copy constraints to info_VkBufferCollectionFUCHSIA if
2613 // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2614 AutoLock<RecursiveLock> lock(mLock);
2615 VkBufferCollectionFUCHSIA buffer_collection =
2616 reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2617 if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2618 info_VkBufferCollectionFUCHSIA.end()) {
2619 info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2620 gfxstream::guest::makeOptional(std::move(setConstraintsResult.constraints));
2621 info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
2622 std::move(setConstraintsResult.createInfoIndex);
2623 }
2624
2625 return VK_SUCCESS;
2626}
2627
2628VkResult ResourceTracker::setBufferCollectionBufferConstraintsFUCHSIA(
2629 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2630 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2631 auto setConstraintsResult =
2632 setBufferCollectionBufferConstraintsImpl(pCollection, pBufferConstraintsInfo);
2633 if (setConstraintsResult.result != VK_SUCCESS) {
2634 return setConstraintsResult.result;
2635 }
2636
2637 // copy constraints to info_VkBufferCollectionFUCHSIA if
2638 // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2639 AutoLock<RecursiveLock> lock(mLock);
2640 VkBufferCollectionFUCHSIA buffer_collection =
2641 reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2642 if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2643 info_VkBufferCollectionFUCHSIA.end()) {
2644 info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2645 gfxstream::guest::makeOptional(setConstraintsResult.constraints);
2646 }
2647
2648 return VK_SUCCESS;
2649}
2650
2651VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
2652 void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2653 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2654 VkEncoder* enc = (VkEncoder*)context;
2655 auto sysmem_collection =
2656 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2657 return setBufferCollectionImageConstraintsFUCHSIA(enc, device, sysmem_collection,
2658 pImageConstraintsInfo);
2659}
2660
2661VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
2662 void*, VkResult, VkDevice, VkBufferCollectionFUCHSIA collection,
2663 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2664 auto sysmem_collection =
2665 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2666 return setBufferCollectionBufferConstraintsFUCHSIA(sysmem_collection, pBufferConstraintsInfo);
2667}
2668
C Stout5a3a4222023-11-14 16:31:56 -08002669VkResult ResourceTracker::getBufferCollectionImageCreateInfoIndexLocked(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002670 VkBufferCollectionFUCHSIA collection, fuchsia_sysmem::wire::BufferCollectionInfo2& info,
2671 uint32_t* outCreateInfoIndex) {
2672 if (!info_VkBufferCollectionFUCHSIA[collection].constraints.hasValue()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002673 mesa_loge("%s: constraints not set", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002674 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2675 }
2676
2677 if (!info.settings.has_image_format_constraints) {
2678 // no image format constraints, skip getting createInfoIndex.
2679 return VK_SUCCESS;
2680 }
2681
2682 const auto& constraints = *info_VkBufferCollectionFUCHSIA[collection].constraints;
2683 const auto& createInfoIndices = info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
2684 const auto& out = info.settings.image_format_constraints;
2685 bool foundCreateInfo = false;
2686
2687 for (size_t imageFormatIndex = 0; imageFormatIndex < constraints.image_format_constraints_count;
2688 imageFormatIndex++) {
2689 const auto& in = constraints.image_format_constraints[imageFormatIndex];
2690 // These checks are sorted in order of how often they're expected to
2691 // mismatch, from most likely to least likely. They aren't always
2692 // equality comparisons, since sysmem may change some values in
2693 // compatible ways on behalf of the other participants.
2694 if ((out.pixel_format.type != in.pixel_format.type) ||
2695 (out.pixel_format.has_format_modifier != in.pixel_format.has_format_modifier) ||
2696 (out.pixel_format.format_modifier.value != in.pixel_format.format_modifier.value) ||
2697 (out.min_bytes_per_row < in.min_bytes_per_row) ||
2698 (out.required_max_coded_width < in.required_max_coded_width) ||
2699 (out.required_max_coded_height < in.required_max_coded_height) ||
2700 (in.bytes_per_row_divisor != 0 &&
2701 out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
2702 continue;
2703 }
2704 // Check if the out colorspaces are a subset of the in color spaces.
2705 bool all_color_spaces_found = true;
2706 for (uint32_t j = 0; j < out.color_spaces_count; j++) {
2707 bool found_matching_color_space = false;
2708 for (uint32_t k = 0; k < in.color_spaces_count; k++) {
2709 if (out.color_space[j].type == in.color_space[k].type) {
2710 found_matching_color_space = true;
2711 break;
2712 }
2713 }
2714 if (!found_matching_color_space) {
2715 all_color_spaces_found = false;
2716 break;
2717 }
2718 }
2719 if (!all_color_spaces_found) {
2720 continue;
2721 }
2722
2723 // Choose the first valid format for now.
2724 *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
2725 return VK_SUCCESS;
2726 }
2727
Gurchetan Singh42361f72024-05-16 17:37:11 -07002728 mesa_loge("%s: cannot find a valid image format in constraints", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002729 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2730}
2731
2732VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
2733 void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2734 VkBufferCollectionPropertiesFUCHSIA* pProperties) {
2735 VkEncoder* enc = (VkEncoder*)context;
2736 const auto& sysmem_collection =
2737 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2738
2739 auto result = sysmem_collection->WaitForBuffersAllocated();
2740 if (!result.ok() || result->status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002741 mesa_loge("Failed wait for allocation: %d %d", result.status(),
2742 GET_STATUS_SAFE(result, status));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002743 return VK_ERROR_INITIALIZATION_FAILED;
2744 }
2745 fuchsia_sysmem::wire::BufferCollectionInfo2 info = std::move(result->buffer_collection_info);
2746
2747 bool is_host_visible =
2748 info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2749 bool is_device_local =
2750 info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2751 if (!is_host_visible && !is_device_local) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002752 mesa_loge("buffer collection uses a non-goldfish heap (type 0x%lu)",
2753 static_cast<uint64_t>(info.settings.buffer_settings.heap));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002754 return VK_ERROR_INITIALIZATION_FAILED;
2755 }
2756
2757 // memoryTypeBits
2758 // ====================================================================
2759 {
2760 AutoLock<RecursiveLock> lock(mLock);
2761 auto deviceIt = info_VkDevice.find(device);
2762 if (deviceIt == info_VkDevice.end()) {
2763 return VK_ERROR_INITIALIZATION_FAILED;
2764 }
2765 auto& deviceInfo = deviceIt->second;
2766
2767 // Device local memory type supported.
2768 pProperties->memoryTypeBits = 0;
2769 for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
2770 if ((is_device_local && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2771 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2772 (is_host_visible && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2773 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2774 pProperties->memoryTypeBits |= 1ull << i;
2775 }
2776 }
2777 }
2778
2779 // bufferCount
2780 // ====================================================================
2781 pProperties->bufferCount = info.buffer_count;
2782
2783 auto storeProperties = [this, collection, pProperties]() -> VkResult {
2784 // store properties to storage
2785 AutoLock<RecursiveLock> lock(mLock);
2786 if (info_VkBufferCollectionFUCHSIA.find(collection) ==
2787 info_VkBufferCollectionFUCHSIA.end()) {
2788 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2789 }
2790
2791 info_VkBufferCollectionFUCHSIA[collection].properties =
2792 gfxstream::guest::makeOptional(*pProperties);
2793
2794 // We only do a shallow copy so we should remove all pNext pointers.
2795 info_VkBufferCollectionFUCHSIA[collection].properties->pNext = nullptr;
2796 info_VkBufferCollectionFUCHSIA[collection].properties->sysmemColorSpaceIndex.pNext =
2797 nullptr;
2798 return VK_SUCCESS;
2799 };
2800
2801 // The fields below only apply to buffer collections with image formats.
2802 if (!info.settings.has_image_format_constraints) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002803 mesa_logd("%s: buffer collection doesn't have image format constraints", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002804 return storeProperties();
2805 }
2806
2807 // sysmemFormat
2808 // ====================================================================
2809
2810 pProperties->sysmemPixelFormat =
2811 static_cast<uint64_t>(info.settings.image_format_constraints.pixel_format.type);
2812
2813 // colorSpace
2814 // ====================================================================
2815 if (info.settings.image_format_constraints.color_spaces_count == 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002816 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002817 "%s: color space missing from allocated buffer collection "
2818 "constraints",
2819 __func__);
2820 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2821 }
2822 // Only report first colorspace for now.
2823 pProperties->sysmemColorSpaceIndex.colorSpace =
2824 static_cast<uint32_t>(info.settings.image_format_constraints.color_space[0].type);
2825
2826 // createInfoIndex
2827 // ====================================================================
2828 {
2829 AutoLock<RecursiveLock> lock(mLock);
2830 auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
2831 collection, info, &pProperties->createInfoIndex);
2832 if (getIndexResult != VK_SUCCESS) {
2833 return getIndexResult;
2834 }
2835 }
2836
2837 // formatFeatures
2838 // ====================================================================
2839 VkPhysicalDevice physicalDevice;
2840 {
2841 AutoLock<RecursiveLock> lock(mLock);
2842 auto deviceIt = info_VkDevice.find(device);
2843 if (deviceIt == info_VkDevice.end()) {
2844 return VK_ERROR_INITIALIZATION_FAILED;
2845 }
2846 physicalDevice = deviceIt->second.physdev;
2847 }
2848
2849 VkFormat vkFormat =
2850 sysmemPixelFormatTypeToVk(info.settings.image_format_constraints.pixel_format.type);
2851 VkFormatProperties formatProperties;
2852 enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, vkFormat, &formatProperties,
2853 true /* do lock */);
2854 if (is_device_local) {
2855 pProperties->formatFeatures = formatProperties.optimalTilingFeatures;
2856 }
2857 if (is_host_visible) {
2858 pProperties->formatFeatures = formatProperties.linearTilingFeatures;
2859 }
2860
2861 // YCbCr properties
2862 // ====================================================================
2863 // TODO(59804): Implement this correctly when we support YUV pixel
2864 // formats in goldfish ICD.
2865 pProperties->samplerYcbcrConversionComponents.r = VK_COMPONENT_SWIZZLE_IDENTITY;
2866 pProperties->samplerYcbcrConversionComponents.g = VK_COMPONENT_SWIZZLE_IDENTITY;
2867 pProperties->samplerYcbcrConversionComponents.b = VK_COMPONENT_SWIZZLE_IDENTITY;
2868 pProperties->samplerYcbcrConversionComponents.a = VK_COMPONENT_SWIZZLE_IDENTITY;
2869 pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
2870 pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
2871 pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2872 pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2873
2874 return storeProperties();
2875}
2876#endif
2877
Gurchetan Singhc4444b82023-09-19 08:06:20 -07002878static uint32_t getVirglFormat(VkFormat vkFormat) {
2879 uint32_t virglFormat = 0;
2880
2881 switch (vkFormat) {
2882 case VK_FORMAT_R8G8B8A8_SINT:
2883 case VK_FORMAT_R8G8B8A8_UNORM:
2884 case VK_FORMAT_R8G8B8A8_SRGB:
2885 case VK_FORMAT_R8G8B8A8_SNORM:
2886 case VK_FORMAT_R8G8B8A8_SSCALED:
2887 case VK_FORMAT_R8G8B8A8_USCALED:
2888 virglFormat = VIRGL_FORMAT_R8G8B8A8_UNORM;
2889 break;
2890 case VK_FORMAT_B8G8R8A8_SINT:
2891 case VK_FORMAT_B8G8R8A8_UNORM:
2892 case VK_FORMAT_B8G8R8A8_SRGB:
2893 case VK_FORMAT_B8G8R8A8_SNORM:
2894 case VK_FORMAT_B8G8R8A8_SSCALED:
2895 case VK_FORMAT_B8G8R8A8_USCALED:
2896 virglFormat = VIRGL_FORMAT_B8G8R8A8_UNORM;
2897 break;
2898 default:
2899 break;
2900 }
2901
2902 return virglFormat;
2903}
2904
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002905CoherentMemoryPtr ResourceTracker::createCoherentMemory(
2906 VkDevice device, VkDeviceMemory mem, const VkMemoryAllocateInfo& hostAllocationInfo,
2907 VkEncoder* enc, VkResult& res) {
2908 CoherentMemoryPtr coherentMemory = nullptr;
2909
2910#if defined(__ANDROID__)
2911 if (mFeatureInfo->hasDirectMem) {
2912 uint64_t gpuAddr = 0;
2913 GoldfishAddressSpaceBlockPtr block = nullptr;
2914 res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true);
2915 if (res != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002916 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002917 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2918 "returned:%d.",
2919 res);
2920 return coherentMemory;
2921 }
2922 {
2923 AutoLock<RecursiveLock> lock(mLock);
2924 auto it = info_VkDeviceMemory.find(mem);
2925 if (it == info_VkDeviceMemory.end()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002926 mesa_loge("Failed to create coherent memory: failed to find device memory.");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002927 res = VK_ERROR_OUT_OF_HOST_MEMORY;
Hailin zhangad3b3de2022-10-03 22:46:10 +00002928 return coherentMemory;
2929 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002930 auto& info = it->second;
2931 block = info.goldfishBlock;
2932 info.goldfishBlock = nullptr;
Lingfeng Yanga963ea02019-03-21 21:27:04 -07002933
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002934 coherentMemory = std::make_shared<CoherentMemory>(
2935 block, gpuAddr, hostAllocationInfo.allocationSize, device, mem);
2936 }
2937 } else
2938#endif // defined(__ANDROID__)
Jason Macnak650c0c02023-07-20 16:06:53 -07002939 if (mFeatureInfo->hasVirtioGpuNext) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002940 struct VirtGpuCreateBlob createBlob = {0};
Hailin zhangad3b3de2022-10-03 22:46:10 +00002941 uint64_t hvaSizeId[3];
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002942 res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem, &hvaSizeId[0], &hvaSizeId[1],
2943 &hvaSizeId[2], true /* do lock */);
Jason Macnakdf2f0742023-08-25 16:35:33 +00002944 if (res != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002945 mesa_loge(
Jason Macnakdf2f0742023-08-25 16:35:33 +00002946 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2947 "returned:%d.",
2948 res);
Hailin zhangad3b3de2022-10-03 22:46:10 +00002949 return coherentMemory;
2950 }
2951 {
2952 AutoLock<RecursiveLock> lock(mLock);
Jason Macnak313357f2023-07-19 14:45:43 -07002953 VirtGpuDevice* instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
Hailin zhangad3b3de2022-10-03 22:46:10 +00002954 createBlob.blobMem = kBlobMemHost3d;
2955 createBlob.flags = kBlobFlagMappable;
2956 createBlob.blobId = hvaSizeId[2];
2957 createBlob.size = hostAllocationInfo.allocationSize;
2958
Jason Macnak313357f2023-07-19 14:45:43 -07002959 auto blob = instance->createBlob(createBlob);
Hailin zhangad3b3de2022-10-03 22:46:10 +00002960 if (!blob) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002961 mesa_loge("Failed to create coherent memory: failed to create blob.");
Hailin zhangad3b3de2022-10-03 22:46:10 +00002962 res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2963 return coherentMemory;
2964 }
2965
Jason Macnak6d3d7b22024-04-01 16:48:53 -07002966 VirtGpuResourceMappingPtr mapping = blob->createMapping();
Hailin zhangad3b3de2022-10-03 22:46:10 +00002967 if (!mapping) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002968 mesa_loge("Failed to create coherent memory: failed to create blob mapping.");
Hailin zhangad3b3de2022-10-03 22:46:10 +00002969 res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2970 return coherentMemory;
2971 }
2972
2973 coherentMemory =
2974 std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem);
2975 }
2976 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07002977 mesa_loge("FATAL: Unsupported virtual memory feature");
Hailin zhangad3b3de2022-10-03 22:46:10 +00002978 abort();
2979 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002980 return coherentMemory;
2981}
2982
2983VkResult ResourceTracker::allocateCoherentMemory(VkDevice device,
2984 const VkMemoryAllocateInfo* pAllocateInfo,
2985 VkEncoder* enc, VkDeviceMemory* pMemory) {
2986 uint64_t blobId = 0;
2987 uint64_t offset = 0;
2988 uint8_t* ptr = nullptr;
2989 VkMemoryAllocateFlagsInfo allocFlagsInfo;
2990 VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
2991 VkCreateBlobGOOGLE createBlobInfo;
Jason Macnak6d3d7b22024-04-01 16:48:53 -07002992 VirtGpuResourcePtr guestBlob = nullptr;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07002993
2994 memset(&createBlobInfo, 0, sizeof(struct VkCreateBlobGOOGLE));
2995 createBlobInfo.sType = VK_STRUCTURE_TYPE_CREATE_BLOB_GOOGLE;
2996
2997 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
2998 vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
2999 const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3000 vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
3001
3002 bool deviceAddressMemoryAllocation =
3003 allocFlagsInfoPtr &&
3004 ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3005 (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3006
3007 bool dedicated = deviceAddressMemoryAllocation;
3008
3009 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3010 dedicated = true;
3011
3012 VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo);
3013 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo);
3014
3015 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3016 hostAllocationInfo.allocationSize =
3017 ALIGN(pAllocateInfo->allocationSize, mCaps.vulkanCapset.blobAlignment);
3018 } else if (dedicated) {
3019 // Over-aligning to kLargestSize to some Windows drivers (b:152769369). Can likely
3020 // have host report the desired alignment.
3021 hostAllocationInfo.allocationSize = ALIGN(pAllocateInfo->allocationSize, kLargestPageSize);
3022 } else {
3023 VkDeviceSize roundedUpAllocSize = ALIGN(pAllocateInfo->allocationSize, kMegaByte);
3024 hostAllocationInfo.allocationSize = std::max(roundedUpAllocSize, kDefaultHostMemBlockSize);
Hailin zhangad3b3de2022-10-03 22:46:10 +00003025 }
3026
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003027 // Support device address capture/replay allocations
3028 if (deviceAddressMemoryAllocation) {
Lingfeng Yangbf019a62021-08-26 15:43:39 -07003029 if (allocFlagsInfoPtr) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003030 mesa_logi("%s: has alloc flags\n", __func__);
Lingfeng Yangbf019a62021-08-26 15:43:39 -07003031 allocFlagsInfo = *allocFlagsInfoPtr;
3032 vk_append_struct(&structChainIter, &allocFlagsInfo);
3033 }
3034
3035 if (opaqueCaptureAddressAllocInfoPtr) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003036 mesa_logi("%s: has opaque capture address\n", __func__);
Lingfeng Yangbf019a62021-08-26 15:43:39 -07003037 opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3038 vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3039 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003040 }
Lingfeng Yangbf019a62021-08-26 15:43:39 -07003041
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003042 if (mCaps.params[kParamCreateGuestHandle]) {
3043 struct VirtGpuCreateBlob createBlob = {0};
3044 struct VirtGpuExecBuffer exec = {};
3045 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3046 struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003047
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003048 createBlobInfo.blobId = ++mBlobId;
3049 createBlobInfo.blobMem = kBlobMemGuest;
3050 createBlobInfo.blobFlags = kBlobFlagCreateGuestHandle;
3051 vk_append_struct(&structChainIter, &createBlobInfo);
3052
3053 createBlob.blobMem = kBlobMemGuest;
3054 createBlob.flags = kBlobFlagCreateGuestHandle;
3055 createBlob.blobId = createBlobInfo.blobId;
3056 createBlob.size = hostAllocationInfo.allocationSize;
3057
3058 guestBlob = instance->createBlob(createBlob);
3059 if (!guestBlob) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003060 mesa_loge("Failed to allocate coherent memory: failed to create blob.");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003061 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3062 }
3063
3064 placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3065 exec.command = static_cast<void*>(&placeholderCmd);
3066 exec.command_size = sizeof(placeholderCmd);
3067 exec.flags = kRingIdx;
3068 exec.ring_idx = 1;
Tom Cherry772e6ac2023-04-12 13:38:29 -07003069 if (instance->execBuffer(exec, guestBlob.get())) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003070 mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003071 return VK_ERROR_OUT_OF_HOST_MEMORY;
3072 }
3073
3074 guestBlob->wait();
3075 } else if (mCaps.vulkanCapset.deferredMapping) {
3076 createBlobInfo.blobId = ++mBlobId;
3077 createBlobInfo.blobMem = kBlobMemHost3d;
3078 vk_append_struct(&structChainIter, &createBlobInfo);
3079 }
3080
3081 VkDeviceMemory mem = VK_NULL_HANDLE;
3082 VkResult host_res =
3083 enc->vkAllocateMemory(device, &hostAllocationInfo, nullptr, &mem, true /* do lock */);
3084 if (host_res != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003085 mesa_loge("Failed to allocate coherent memory: failed to allocate on the host: %d.",
3086 host_res);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003087 return host_res;
3088 }
3089
3090 struct VkDeviceMemory_Info info;
3091 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3092 info.allocationSize = pAllocateInfo->allocationSize;
3093 info.blobId = createBlobInfo.blobId;
3094 }
3095
3096 if (guestBlob) {
3097 auto mapping = guestBlob->createMapping();
3098 if (!mapping) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003099 mesa_loge("Failed to allocate coherent memory: failed to create blob mapping.");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003100 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3101 }
3102
3103 auto coherentMemory = std::make_shared<CoherentMemory>(
3104 mapping, hostAllocationInfo.allocationSize, device, mem);
3105
3106 coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3107 info.coherentMemoryOffset = offset;
3108 info.coherentMemory = coherentMemory;
3109 info.ptr = ptr;
3110 }
3111
3112 info.coherentMemorySize = hostAllocationInfo.allocationSize;
3113 info.memoryTypeIndex = hostAllocationInfo.memoryTypeIndex;
3114 info.device = device;
3115 info.dedicated = dedicated;
3116 {
3117 // createCoherentMemory inside need to access info_VkDeviceMemory
3118 // information. set it before use.
3119 AutoLock<RecursiveLock> lock(mLock);
3120 info_VkDeviceMemory[mem] = info;
3121 }
3122
3123 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3124 *pMemory = mem;
3125 return host_res;
3126 }
3127
3128 auto coherentMemory = createCoherentMemory(device, mem, hostAllocationInfo, enc, host_res);
3129 if (coherentMemory) {
3130 AutoLock<RecursiveLock> lock(mLock);
3131 coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3132 info.allocationSize = pAllocateInfo->allocationSize;
3133 info.coherentMemoryOffset = offset;
3134 info.coherentMemory = coherentMemory;
3135 info.ptr = ptr;
3136 info_VkDeviceMemory[mem] = info;
3137 *pMemory = mem;
3138 } else {
3139 enc->vkFreeMemory(device, mem, nullptr, true);
3140 AutoLock<RecursiveLock> lock(mLock);
3141 info_VkDeviceMemory.erase(mem);
3142 }
3143 return host_res;
3144}
3145
3146VkResult ResourceTracker::getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo,
3147 VkEncoder* enc, VkDevice device,
3148 VkDeviceMemory* pMemory) {
3149 VkMemoryAllocateFlagsInfo allocFlagsInfo;
3150 VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3151
3152 // Add buffer device address capture structs
3153 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3154 vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3155
3156 bool dedicated =
3157 allocFlagsInfoPtr &&
3158 ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3159 (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3160
3161 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3162 dedicated = true;
3163
3164 CoherentMemoryPtr coherentMemory = nullptr;
3165 uint8_t* ptr = nullptr;
3166 uint64_t offset = 0;
3167 {
3168 AutoLock<RecursiveLock> lock(mLock);
3169 for (const auto& [memory, info] : info_VkDeviceMemory) {
Lars Harrison18a161a2024-02-03 02:18:23 +00003170 if (info.device != device) continue;
3171
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003172 if (info.memoryTypeIndex != pAllocateInfo->memoryTypeIndex) continue;
3173
3174 if (info.dedicated || dedicated) continue;
3175
3176 if (!info.coherentMemory) continue;
3177
3178 if (!info.coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset))
3179 continue;
3180
3181 coherentMemory = info.coherentMemory;
3182 break;
3183 }
3184 if (coherentMemory) {
3185 struct VkDeviceMemory_Info info;
3186 info.coherentMemoryOffset = offset;
3187 info.ptr = ptr;
3188 info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex;
3189 info.allocationSize = pAllocateInfo->allocationSize;
3190 info.coherentMemory = coherentMemory;
3191 info.device = device;
3192
3193 // for suballocated memory, create an alias VkDeviceMemory handle for application
3194 // memory used for suballocations will still be VkDeviceMemory associated with
3195 // CoherentMemory
3196 auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
3197 info_VkDeviceMemory[mem] = info;
3198 *pMemory = mem;
3199 return VK_SUCCESS;
3200 }
3201 }
3202 return allocateCoherentMemory(device, pAllocateInfo, enc, pMemory);
3203}
3204
3205VkResult ResourceTracker::on_vkAllocateMemory(void* context, VkResult input_result, VkDevice device,
3206 const VkMemoryAllocateInfo* pAllocateInfo,
3207 const VkAllocationCallbacks* pAllocator,
3208 VkDeviceMemory* pMemory) {
3209#define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result) \
3210 { \
3211 auto it = info_VkDevice.find(device); \
3212 if (it == info_VkDevice.end()) return result; \
3213 emitDeviceMemoryReport(it->second, \
3214 VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, 0, \
3215 pAllocateInfo->allocationSize, VK_OBJECT_TYPE_DEVICE_MEMORY, 0, \
3216 pAllocateInfo->memoryTypeIndex); \
3217 return result; \
3218 }
3219
3220#define _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT \
3221 { \
3222 uint64_t memoryObjectId = (uint64_t)(void*)*pMemory; \
3223 if (ahw) { \
3224 memoryObjectId = getAHardwareBufferId(ahw); \
3225 } \
3226 emitDeviceMemoryReport(info_VkDevice[device], \
3227 isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT \
3228 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, \
3229 memoryObjectId, pAllocateInfo->allocationSize, \
3230 VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory, \
3231 pAllocateInfo->memoryTypeIndex); \
3232 return VK_SUCCESS; \
3233 }
3234
3235 if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3236
3237 VkEncoder* enc = (VkEncoder*)context;
3238
Gurchetan Singh46aac192024-06-26 16:31:32 -07003239 bool hasDedicatedImage = false;
3240 bool hasDedicatedBuffer = false;
3241
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003242 VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
3243 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
3244
3245 VkMemoryAllocateFlagsInfo allocFlagsInfo;
3246 VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3247
3248 // Add buffer device address capture structs
3249 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3250 vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3251 const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3252 vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
3253
3254 if (allocFlagsInfoPtr) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003255 mesa_logi("%s: has alloc flags\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003256 allocFlagsInfo = *allocFlagsInfoPtr;
3257 vk_append_struct(&structChainIter, &allocFlagsInfo);
3258 }
3259
3260 if (opaqueCaptureAddressAllocInfoPtr) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003261 mesa_logi("%s: has opaque capture address\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003262 opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3263 vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3264 }
3265
3266 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
3267 VkImportColorBufferGOOGLE importCbInfo = {
3268 VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE,
3269 0,
3270 };
3271 VkImportBufferGOOGLE importBufferInfo = {
3272 VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
3273 0,
3274 };
3275 // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
3276 // VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
3277 // };
3278
3279 const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
3280 vk_find_struct<VkExportMemoryAllocateInfo>(pAllocateInfo);
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003281
Kaiyi Li6a76b332022-08-23 08:10:59 -07003282#ifdef VK_USE_PLATFORM_ANDROID_KHR
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003283 const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
3284 vk_find_struct<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo);
Kaiyi Li6a76b332022-08-23 08:10:59 -07003285#else
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003286 const void* importAhbInfoPtr = nullptr;
Kaiyi Li6a76b332022-08-23 08:10:59 -07003287#endif
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003288
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003289#if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
3290 const VkImportMemoryFdInfoKHR* importFdInfoPtr =
3291 vk_find_struct<VkImportMemoryFdInfoKHR>(pAllocateInfo);
3292#else
3293 const VkImportMemoryFdInfoKHR* importFdInfoPtr = nullptr;
3294#endif
3295
Yilong Lid5a6c162022-01-04 01:03:12 -08003296#ifdef VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003297 const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr =
3298 vk_find_struct<VkImportMemoryBufferCollectionFUCHSIA>(pAllocateInfo);
Yilong Lie12328f2022-01-06 03:32:13 -08003299
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003300 const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
3301 vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(pAllocateInfo);
Yilong Lid5a6c162022-01-04 01:03:12 -08003302#else
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003303 const void* importBufferCollectionInfoPtr = nullptr;
3304 const void* importVmoInfoPtr = nullptr;
Yilong Lid5a6c162022-01-04 01:03:12 -08003305#endif // VK_USE_PLATFORM_FUCHSIA
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003306
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003307 const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
3308 vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo);
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003309
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003310 // Note for AHardwareBuffers, the Vulkan spec states:
3311 //
3312 // Android hardware buffers have intrinsic width, height, format, and usage
3313 // properties, so Vulkan images bound to memory imported from an Android
3314 // hardware buffer must use dedicated allocations
3315 //
3316 // so any allocation requests with a VkImportAndroidHardwareBufferInfoANDROID
3317 // will necessarily have a VkMemoryDedicatedAllocateInfo. However, the host
3318 // may or may not actually use a dedicated allocation to emulate
3319 // AHardwareBuffers. As such, the VkMemoryDedicatedAllocateInfo is passed to the
3320 // host and the host will decide whether or not to use it.
Jason Macnakb233a512023-02-23 00:40:01 +00003321
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003322 bool shouldPassThroughDedicatedAllocInfo =
3323 !exportAllocateInfoPtr && !importBufferCollectionInfoPtr && !importVmoInfoPtr;
Yilong Li2753c7a2021-02-04 12:58:33 -08003324
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003325 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProps =
3326 getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
Jason Macnak894a2932023-03-15 09:41:46 -07003327
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003328 const bool requestedMemoryIsHostVisible =
3329 isHostVisible(&physicalDeviceMemoryProps, pAllocateInfo->memoryTypeIndex);
Jason Macnak894a2932023-03-15 09:41:46 -07003330
Yilong Li32f5c9e2023-03-30 21:37:53 -07003331#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003332 shouldPassThroughDedicatedAllocInfo &= !requestedMemoryIsHostVisible;
Yilong Li2753c7a2021-02-04 12:58:33 -08003333#endif // VK_USE_PLATFORM_FUCHSIA
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003334
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003335 if (shouldPassThroughDedicatedAllocInfo && dedicatedAllocInfoPtr) {
3336 dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
3337 vk_append_struct(&structChainIter, &dedicatedAllocInfo);
3338 }
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003339
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003340 // State needed for import/export.
3341 bool exportAhb = false;
3342 bool exportVmo = false;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003343 bool exportDmabuf = false;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003344 bool importAhb = false;
3345 bool importBufferCollection = false;
3346 bool importVmo = false;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003347 bool importDmabuf = false;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003348 (void)exportVmo;
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003349
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003350 // Even if we export allocate, the underlying operation
3351 // for the host is always going to be an import operation.
3352 // This is also how Intel's implementation works,
3353 // and is generally simpler;
3354 // even in an export allocation,
3355 // we perform AHardwareBuffer allocation
3356 // on the guest side, at this layer,
3357 // and then we attach a new VkDeviceMemory
3358 // to the AHardwareBuffer on the host via an "import" operation.
3359 AHardwareBuffer* ahw = nullptr;
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003360
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003361 if (exportAllocateInfoPtr) {
3362 exportAhb = exportAllocateInfoPtr->handleTypes &
3363 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
Yilong Lid5a6c162022-01-04 01:03:12 -08003364#ifdef VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003365 exportVmo = exportAllocateInfoPtr->handleTypes &
3366 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
Yilong Lid5a6c162022-01-04 01:03:12 -08003367#endif // VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003368 exportDmabuf =
3369 exportAllocateInfoPtr->handleTypes & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3370 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003371 } else if (importAhbInfoPtr) {
3372 importAhb = true;
3373 } else if (importBufferCollectionInfoPtr) {
3374 importBufferCollection = true;
3375 } else if (importVmoInfoPtr) {
3376 importVmo = true;
3377 }
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003378
3379 if (importFdInfoPtr) {
3380 importDmabuf =
3381 (importFdInfoPtr->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3382 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
3383 }
3384 bool isImport = importAhb || importBufferCollection || importVmo || importDmabuf;
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003385
Craig Stouted464d72022-07-13 14:57:38 -07003386#if defined(VK_USE_PLATFORM_ANDROID_KHR)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003387 if (exportAhb) {
Gurchetan Singh46aac192024-06-26 16:31:32 -07003388 hasDedicatedImage =
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003389 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
Gurchetan Singh46aac192024-06-26 16:31:32 -07003390 hasDedicatedBuffer =
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003391 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3392 VkExtent3D imageExtent = {0, 0, 0};
3393 uint32_t imageLayers = 0;
3394 VkFormat imageFormat = VK_FORMAT_UNDEFINED;
3395 VkImageUsageFlags imageUsage = 0;
3396 VkImageCreateFlags imageCreateFlags = 0;
3397 VkDeviceSize bufferSize = 0;
3398 VkDeviceSize allocationInfoAllocSize = finalAllocInfo.allocationSize;
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003399
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003400 if (hasDedicatedImage) {
3401 AutoLock<RecursiveLock> lock(mLock);
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003402
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003403 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3404 if (it == info_VkImage.end())
3405 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3406 const auto& info = it->second;
3407 const auto& imgCi = info.createInfo;
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003408
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003409 imageExtent = imgCi.extent;
3410 imageLayers = imgCi.arrayLayers;
3411 imageFormat = imgCi.format;
3412 imageUsage = imgCi.usage;
3413 imageCreateFlags = imgCi.flags;
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003414 }
3415
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003416 if (hasDedicatedBuffer) {
3417 AutoLock<RecursiveLock> lock(mLock);
3418
3419 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3420 if (it == info_VkBuffer.end())
3421 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3422 const auto& info = it->second;
3423 const auto& bufCi = info.createInfo;
3424
3425 bufferSize = bufCi.size;
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003426 }
3427
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003428 VkResult ahbCreateRes = createAndroidHardwareBuffer(
3429 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
3430 hasDedicatedImage, hasDedicatedBuffer, imageExtent, imageLayers, imageFormat,
3431 imageUsage, imageCreateFlags, bufferSize, allocationInfoAllocSize, &ahw);
Jason Macnak485e4be92022-06-14 16:33:46 -07003432
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003433 if (ahbCreateRes != VK_SUCCESS) {
3434 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
Lingfeng Yanga4b97ac2019-02-19 15:50:26 -08003435 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003436 }
3437
3438 if (importAhb) {
3439 ahw = importAhbInfoPtr->buffer;
3440 // We still need to acquire the AHardwareBuffer.
3441 importAndroidHardwareBuffer(
3442 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
3443 importAhbInfoPtr, nullptr);
3444 }
3445
3446 if (ahw) {
3447 auto* gralloc =
3448 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
3449
3450 const uint32_t hostHandle = gralloc->getHostHandle(ahw);
Yahan Zhou79ab57a2023-09-14 16:24:26 -07003451 if (gralloc->getFormat(ahw) == AHARDWAREBUFFER_FORMAT_BLOB &&
3452 !gralloc->treatBlobAsImage()) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003453 importBufferInfo.buffer = hostHandle;
3454 vk_append_struct(&structChainIter, &importBufferInfo);
3455 } else {
3456 importCbInfo.colorBuffer = hostHandle;
3457 vk_append_struct(&structChainIter, &importCbInfo);
3458 }
3459 }
Craig Stouted464d72022-07-13 14:57:38 -07003460#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003461 zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
David Revemandf8d9642019-04-24 12:13:36 -04003462
David Revemand5d92d62019-03-29 15:19:25 -04003463#ifdef VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003464 if (importBufferCollection) {
3465 const auto& collection =
3466 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
David Revemand5d92d62019-03-29 15:19:25 -04003467 importBufferCollectionInfoPtr->collection);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003468 auto result = collection->WaitForBuffersAllocated();
3469 if (!result.ok() || result->status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003470 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3471 GET_STATUS_SAFE(result, status));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003472 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3473 }
3474 fuchsia_sysmem::wire::BufferCollectionInfo2& info = result->buffer_collection_info;
3475 uint32_t index = importBufferCollectionInfoPtr->index;
3476 if (info.buffer_count < index) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003477 mesa_loge("Invalid buffer index: %d %d", index);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003478 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3479 }
3480 vmo_handle = info.buffers[index].vmo.release();
3481 }
3482
3483 if (importVmo) {
3484 vmo_handle = importVmoInfoPtr->handle;
3485 }
3486
3487 if (exportVmo) {
Gurchetan Singh46aac192024-06-26 16:31:32 -07003488 hasDedicatedImage =
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003489 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
Gurchetan Singh46aac192024-06-26 16:31:32 -07003490 hasDedicatedBuffer =
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003491 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3492
3493 if (hasDedicatedImage && hasDedicatedBuffer) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003494 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003495 "Invalid VkMemoryDedicatedAllocationInfo: At least one "
3496 "of image and buffer must be VK_NULL_HANDLE.");
3497 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
David Revemand5d92d62019-03-29 15:19:25 -04003498 }
3499
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003500 const VkImageCreateInfo* pImageCreateInfo = nullptr;
3501
3502 VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
3503 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
3504 .pNext = nullptr,
3505 .createInfo = {},
3506 .requiredFormatFeatures = 0,
3507 .bufferCollectionConstraints =
3508 VkBufferCollectionConstraintsInfoFUCHSIA{
3509 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
3510 .pNext = nullptr,
3511 .minBufferCount = 1,
3512 .maxBufferCount = 0,
3513 .minBufferCountForCamping = 0,
3514 .minBufferCountForDedicatedSlack = 0,
3515 .minBufferCountForSharedSlack = 0,
3516 },
3517 };
3518 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo = nullptr;
3519
3520 if (hasDedicatedImage) {
3521 AutoLock<RecursiveLock> lock(mLock);
3522
3523 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3524 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3525 const auto& imageInfo = it->second;
3526
3527 pImageCreateInfo = &imageInfo.createInfo;
David Revemand5d92d62019-03-29 15:19:25 -04003528 }
3529
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003530 if (hasDedicatedBuffer) {
3531 AutoLock<RecursiveLock> lock(mLock);
David Reveman5b7c5842019-02-20 01:06:48 -05003532
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003533 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3534 if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
3535 const auto& bufferInfo = it->second;
3536
3537 bufferConstraintsInfo.createInfo = bufferInfo.createInfo;
3538 pBufferConstraintsInfo = &bufferConstraintsInfo;
3539 }
3540
3541 hasDedicatedImage =
3542 hasDedicatedImage && getBufferCollectionConstraintsVulkanImageUsage(pImageCreateInfo);
3543 hasDedicatedBuffer = hasDedicatedBuffer && getBufferCollectionConstraintsVulkanBufferUsage(
3544 pBufferConstraintsInfo);
3545
3546 if (hasDedicatedImage || hasDedicatedBuffer) {
3547 auto token_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
3548 if (!token_ends.is_ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003549 mesa_loge("zx_channel_create failed: %d", token_ends.status_value());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003550 abort();
Yilong Lie1fddb52020-07-10 17:56:43 -07003551 }
3552
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003553 {
3554 auto result =
3555 mSysmemAllocator->AllocateSharedCollection(std::move(token_ends->server));
3556 if (!result.ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003557 mesa_loge("AllocateSharedCollection failed: %d", result.status());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003558 abort();
3559 }
3560 }
Yilong Lie1fddb52020-07-10 17:56:43 -07003561
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003562 auto collection_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
3563 if (!collection_ends.is_ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003564 mesa_loge("zx_channel_create failed: %d", collection_ends.status_value());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003565 abort();
3566 }
Yilong Lie1fddb52020-07-10 17:56:43 -07003567
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003568 {
3569 auto result = mSysmemAllocator->BindSharedCollection(
3570 std::move(token_ends->client), std::move(collection_ends->server));
3571 if (!result.ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003572 mesa_loge("BindSharedCollection failed: %d", result.status());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003573 abort();
3574 }
3575 }
3576
3577 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
3578 std::move(collection_ends->client));
David Revemandf8d9642019-04-24 12:13:36 -04003579 if (hasDedicatedImage) {
Mitchell Kemberd9f3e4a2024-01-29 16:53:33 -08003580 // TODO(fxbug.dev/42172354): Use setBufferCollectionImageConstraintsFUCHSIA.
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003581 VkResult res = setBufferCollectionConstraintsFUCHSIA(enc, device, &collection,
3582 pImageCreateInfo);
3583 if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003584 mesa_loge("setBufferCollectionConstraints failed: format %u is not supported",
3585 pImageCreateInfo->format);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003586 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3587 }
3588 if (res != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003589 mesa_loge("setBufferCollectionConstraints failed: %d", res);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003590 abort();
3591 }
David Revemandf8d9642019-04-24 12:13:36 -04003592 }
3593
Yilong Lie1fddb52020-07-10 17:56:43 -07003594 if (hasDedicatedBuffer) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003595 VkResult res = setBufferCollectionBufferConstraintsFUCHSIA(&collection,
3596 pBufferConstraintsInfo);
3597 if (res != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003598 mesa_loge("setBufferCollectionBufferConstraints failed: %d", res);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003599 abort();
3600 }
Yilong Lie1fddb52020-07-10 17:56:43 -07003601 }
3602
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003603 {
3604 auto result = collection->WaitForBuffersAllocated();
3605 if (result.ok() && result->status == ZX_OK) {
3606 fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3607 result->buffer_collection_info;
3608 if (!info.buffer_count) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003609 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003610 "WaitForBuffersAllocated returned "
3611 "invalid count: %d",
3612 info.buffer_count);
3613 abort();
3614 }
3615 vmo_handle = info.buffers[0].vmo.release();
3616 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003617 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3618 GET_STATUS_SAFE(result, status));
David Revemandf8d9642019-04-24 12:13:36 -04003619 abort();
3620 }
David Reveman5b7c5842019-02-20 01:06:48 -05003621 }
David Reveman5b7c5842019-02-20 01:06:48 -05003622
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003623 collection->Close();
3624
David Revemanaa65ad62019-05-02 08:23:46 -04003625 zx::vmo vmo_copy;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003626 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
David Revemanaa65ad62019-05-02 08:23:46 -04003627 vmo_copy.reset_and_get_address());
David Revemandf8d9642019-04-24 12:13:36 -04003628 if (status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003629 mesa_loge("Failed to duplicate VMO: %d", status);
David Revemandf8d9642019-04-24 12:13:36 -04003630 abort();
3631 }
Yilong Lif8eda752020-07-01 19:19:45 -07003632
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003633 if (pImageCreateInfo) {
3634 // Only device-local images need to create color buffer; for
3635 // host-visible images, the color buffer is already created
3636 // when sysmem allocates memory. Here we use the |tiling|
3637 // field of image creation info to determine if it uses
3638 // host-visible memory.
3639 bool isLinear = pImageCreateInfo->tiling == VK_IMAGE_TILING_LINEAR;
3640 if (!isLinear) {
3641 fuchsia_hardware_goldfish::wire::ColorBufferFormatType format;
3642 switch (pImageCreateInfo->format) {
3643 case VK_FORMAT_B8G8R8A8_SINT:
3644 case VK_FORMAT_B8G8R8A8_UNORM:
3645 case VK_FORMAT_B8G8R8A8_SRGB:
3646 case VK_FORMAT_B8G8R8A8_SNORM:
3647 case VK_FORMAT_B8G8R8A8_SSCALED:
3648 case VK_FORMAT_B8G8R8A8_USCALED:
3649 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
3650 break;
3651 case VK_FORMAT_R8G8B8A8_SINT:
3652 case VK_FORMAT_R8G8B8A8_UNORM:
3653 case VK_FORMAT_R8G8B8A8_SRGB:
3654 case VK_FORMAT_R8G8B8A8_SNORM:
3655 case VK_FORMAT_R8G8B8A8_SSCALED:
3656 case VK_FORMAT_R8G8B8A8_USCALED:
3657 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba;
3658 break;
3659 case VK_FORMAT_R8_UNORM:
3660 case VK_FORMAT_R8_UINT:
3661 case VK_FORMAT_R8_USCALED:
3662 case VK_FORMAT_R8_SNORM:
3663 case VK_FORMAT_R8_SINT:
3664 case VK_FORMAT_R8_SSCALED:
3665 case VK_FORMAT_R8_SRGB:
3666 format =
3667 fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kLuminance;
3668 break;
3669 case VK_FORMAT_R8G8_UNORM:
3670 case VK_FORMAT_R8G8_UINT:
3671 case VK_FORMAT_R8G8_USCALED:
3672 case VK_FORMAT_R8G8_SNORM:
3673 case VK_FORMAT_R8G8_SINT:
3674 case VK_FORMAT_R8G8_SSCALED:
3675 case VK_FORMAT_R8G8_SRGB:
3676 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRg;
3677 break;
3678 default:
Gurchetan Singh42361f72024-05-16 17:37:11 -07003679 mesa_loge("Unsupported format: %d", pImageCreateInfo->format);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003680 abort();
3681 }
Yilong Li6dc9b5b2020-07-17 21:05:22 -07003682
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003683 fidl::Arena arena;
3684 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
3685 createParams.set_width(pImageCreateInfo->extent.width)
3686 .set_height(pImageCreateInfo->extent.height)
3687 .set_format(format)
3688 .set_memory_property(
3689 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3690
3691 auto result = mControlDevice->CreateColorBuffer2(std::move(vmo_copy),
3692 std::move(createParams));
3693 if (!result.ok() || result->res != ZX_OK) {
3694 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003695 mesa_logd(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003696 "CreateColorBuffer: color buffer already "
3697 "exists\n");
3698 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003699 mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
3700 GET_STATUS_SAFE(result, res));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003701 abort();
3702 }
3703 }
Yilong Li6dc9b5b2020-07-17 21:05:22 -07003704 }
Yilong Lif8eda752020-07-01 19:19:45 -07003705 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003706
3707 if (pBufferConstraintsInfo) {
3708 fidl::Arena arena;
3709 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
3710 createParams.set_size(arena, pBufferConstraintsInfo->createInfo.size)
3711 .set_memory_property(
3712 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3713
3714 auto result =
3715 mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
3716 if (!result.ok() || result->is_error()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003717 mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
3718 GET_STATUS_SAFE(result, error_value()));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003719 abort();
3720 }
3721 }
3722 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003723 mesa_logw(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003724 "Dedicated image / buffer not available. Cannot create "
3725 "BufferCollection to export VMOs.");
3726 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
David Revemandf8d9642019-04-24 12:13:36 -04003727 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003728 }
3729
3730 if (vmo_handle != ZX_HANDLE_INVALID) {
3731 zx::vmo vmo_copy;
3732 zx_status_t status =
3733 zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, vmo_copy.reset_and_get_address());
3734 if (status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003735 mesa_loge("Failed to duplicate VMO: %d", status);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003736 abort();
3737 }
3738 zx_status_t status2 = ZX_OK;
3739
3740 auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
3741 if (!result.ok() || result->res != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003742 mesa_loge("GetBufferHandle failed: %d:%d", result.status(),
3743 GET_STATUS_SAFE(result, res));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003744 } else {
3745 fuchsia_hardware_goldfish::wire::BufferHandleType handle_type = result->type;
3746 uint32_t buffer_handle = result->id;
3747
3748 if (handle_type == fuchsia_hardware_goldfish::wire::BufferHandleType::kBuffer) {
3749 importBufferInfo.buffer = buffer_handle;
3750 vk_append_struct(&structChainIter, &importBufferInfo);
3751 } else {
3752 importCbInfo.colorBuffer = buffer_handle;
3753 vk_append_struct(&structChainIter, &importCbInfo);
3754 }
3755 }
3756 }
David Revemandf8d9642019-04-24 12:13:36 -04003757#endif
3758
Jason Macnak6d3d7b22024-04-01 16:48:53 -07003759 VirtGpuResourcePtr colorBufferBlob = nullptr;
Aaron Ruby42a8b752024-05-27 15:37:53 -04003760#if defined(LINUX_GUEST_BUILD)
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003761 if (exportDmabuf) {
3762 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
Gurchetan Singh46aac192024-06-26 16:31:32 -07003763 hasDedicatedImage =
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003764 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
Gurchetan Singh46aac192024-06-26 16:31:32 -07003765 hasDedicatedBuffer =
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003766 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003767
3768 if (hasDedicatedImage) {
3769 VkImageCreateInfo imageCreateInfo;
Aaron Ruby42a8b752024-05-27 15:37:53 -04003770 bool isDmaBufImage = false;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003771 {
3772 AutoLock<RecursiveLock> lock(mLock);
3773
3774 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3775 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3776 const auto& imageInfo = it->second;
3777
3778 imageCreateInfo = imageInfo.createInfo;
Aaron Ruby42a8b752024-05-27 15:37:53 -04003779 isDmaBufImage = imageInfo.isDmaBufImage;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003780 }
Aaron Ruby379a5ed2024-04-23 14:57:55 -04003781
Aaron Ruby42a8b752024-05-27 15:37:53 -04003782 // TODO (b/326956485): Support DRM format modifiers for dmabuf memory
3783 // For now, can only externalize memory for linear images
3784 if (isDmaBufImage) {
3785 const VkImageSubresource imageSubresource = {
3786 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
3787 .mipLevel = 0,
3788 .arrayLayer = 0,
3789 };
3790 VkSubresourceLayout subResourceLayout;
Aaron Rubya8140f42024-05-10 15:50:44 -04003791 on_vkGetImageSubresourceLayout(context, device, dedicatedAllocInfoPtr->image,
3792 &imageSubresource, &subResourceLayout);
Aaron Ruby42a8b752024-05-27 15:37:53 -04003793 if (!subResourceLayout.rowPitch) {
Gurchetan Singh32cf8492024-06-14 23:14:36 -07003794 mesa_loge("Failed to query stride for VirtGpu resource creation.");
Aaron Ruby42a8b752024-05-27 15:37:53 -04003795 return VK_ERROR_INITIALIZATION_FAILED;
3796 }
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003797
Aaron Ruby42a8b752024-05-27 15:37:53 -04003798 uint32_t virglFormat = gfxstream::vk::getVirglFormat(imageCreateInfo.format);
3799 if (!virglFormat) {
3800 mesa_loge("Unsupported VK format for VirtGpu resource, vkFormat: 0x%x",
3801 imageCreateInfo.format);
3802 return VK_ERROR_FORMAT_NOT_SUPPORTED;
3803 }
3804 const uint32_t target = PIPE_TEXTURE_2D;
3805 uint32_t bind = VIRGL_BIND_RENDER_TARGET;
3806 if (VK_IMAGE_TILING_LINEAR == imageCreateInfo.tiling) {
3807 bind |= VIRGL_BIND_LINEAR;
3808 }
3809 colorBufferBlob = instance->createResource(
3810 imageCreateInfo.extent.width, imageCreateInfo.extent.height,
3811 subResourceLayout.rowPitch, virglFormat, target, bind);
3812 if (!colorBufferBlob) {
3813 mesa_loge("Failed to create colorBuffer resource for Image memory");
3814 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3815 }
3816 if (!colorBufferBlob->wait()) {
3817 mesa_loge("Failed to wait for colorBuffer resource for Image memory");
3818 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3819 }
3820 } else {
3821 mesa_logw(
3822 "The VkMemoryDedicatedAllocateInfo::image associated with VkDeviceMemory "
3823 "allocation cannot be used to create exportable resource "
3824 "(VkExportMemoryAllocateInfo).\n");
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003825 }
Aaron Ruby42a8b752024-05-27 15:37:53 -04003826 } else if (hasDedicatedBuffer) {
3827 mesa_logw(
3828 "VkDeviceMemory allocated with VkMemoryDedicatedAllocateInfo::buffer cannot be "
3829 "exported (VkExportMemoryAllocateInfo)");
3830 } else {
3831 mesa_logw(
3832 "VkDeviceMemory is not exportable (VkExportMemoryAllocateInfo). Requires "
3833 "VkMemoryDedicatedAllocateInfo::image to create external resource.");
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003834 }
3835 }
3836
3837 if (importDmabuf) {
3838 VirtGpuExternalHandle importHandle = {};
3839 importHandle.osHandle = importFdInfoPtr->fd;
3840 importHandle.type = kMemHandleDmabuf;
3841
3842 auto instance = VirtGpuDevice::getInstance();
3843 colorBufferBlob = instance->importBlob(importHandle);
3844 if (!colorBufferBlob) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003845 mesa_loge("%s: Failed to import colorBuffer resource\n", __func__);
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003846 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3847 }
3848 }
3849
3850 if (colorBufferBlob) {
3851 importCbInfo.colorBuffer = colorBufferBlob->getResourceHandle();
3852 vk_append_struct(&structChainIter, &importCbInfo);
3853 }
3854#endif
3855
3856 if (ahw || colorBufferBlob || !requestedMemoryIsHostVisible) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003857 input_result =
3858 enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
Lingfeng Yang35e9c6a2018-12-25 17:13:36 -08003859
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003860 if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
Lingfeng Yang35e9c6a2018-12-25 17:13:36 -08003861
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003862 VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
3863 setDeviceMemoryInfo(device, *pMemory, 0, nullptr, finalAllocInfo.memoryTypeIndex, ahw,
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003864 isImport, vmo_handle, colorBufferBlob);
Lingfeng Yang35e9c6a2018-12-25 17:13:36 -08003865
Yahan Zhou285f8572021-06-09 18:35:04 -07003866 _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
Lingfeng Yang131d5a42018-11-30 12:00:33 -08003867 }
3868
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003869#ifdef VK_USE_PLATFORM_FUCHSIA
3870 if (vmo_handle != ZX_HANDLE_INVALID) {
3871 input_result =
3872 enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
Shalini Sdb704c92023-01-27 21:35:33 +00003873
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003874 // Get VMO handle rights, and only use allowed rights to map the
3875 // host memory.
3876 zx_info_handle_basic handle_info;
3877 zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
3878 sizeof(handle_info), nullptr, nullptr);
3879 if (status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003880 mesa_loge("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
3881 status);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003882 return VK_ERROR_OUT_OF_HOST_MEMORY;
Shalini Sdb704c92023-01-27 21:35:33 +00003883 }
3884
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003885 zx_vm_option_t vm_permission = 0u;
3886 vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
3887 vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
3888
3889 zx_paddr_t addr;
3890 status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
3891 finalAllocInfo.allocationSize, &addr);
3892 if (status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003893 mesa_loge("%s: cannot map vmar: status %d.", __func__, status);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003894 return VK_ERROR_OUT_OF_HOST_MEMORY;
3895 }
3896
3897 setDeviceMemoryInfo(device, *pMemory, finalAllocInfo.allocationSize,
3898 reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
Gurchetan Singhc4444b82023-09-19 08:06:20 -07003899 /*ahw=*/nullptr, isImport, vmo_handle, /*blobPtr=*/nullptr);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003900 return VK_SUCCESS;
Shalini Sdb704c92023-01-27 21:35:33 +00003901 }
Kaiyi Li6a76b332022-08-23 08:10:59 -07003902#endif
Shalini Sdb704c92023-01-27 21:35:33 +00003903
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003904 // Host visible memory with direct mapping
3905 VkResult result = getCoherentMemory(&finalAllocInfo, enc, device, pMemory);
3906 if (result != VK_SUCCESS) return result;
3907
3908 _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3909}
3910
3911void ResourceTracker::on_vkFreeMemory(void* context, VkDevice device, VkDeviceMemory memory,
3912 const VkAllocationCallbacks* pAllocateInfo) {
3913 AutoLock<RecursiveLock> lock(mLock);
3914
3915 auto it = info_VkDeviceMemory.find(memory);
3916 if (it == info_VkDeviceMemory.end()) return;
3917 auto& info = it->second;
3918 uint64_t memoryObjectId = (uint64_t)(void*)memory;
3919#ifdef VK_USE_PLATFORM_ANDROID_KHR
3920 if (info.ahw) {
3921 memoryObjectId = getAHardwareBufferId(info.ahw);
3922 }
3923#endif
3924
3925 emitDeviceMemoryReport(info_VkDevice[device],
3926 info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
3927 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
3928 memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY,
3929 (uint64_t)(void*)memory);
Lingfeng Yang35e9c6a2018-12-25 17:13:36 -08003930
Yilong Li1ce350b2020-09-12 18:55:55 -07003931#ifdef VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003932 if (info.vmoHandle && info.ptr) {
3933 zx_status_t status = zx_vmar_unmap(
3934 zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.ptr), info.allocationSize);
3935 if (status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003936 mesa_loge("%s: Cannot unmap ptr: status %d", status);
Yilong Li1ce350b2020-09-12 18:55:55 -07003937 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003938 info.ptr = nullptr;
3939 }
Yilong Li1ce350b2020-09-12 18:55:55 -07003940#endif
3941
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003942 if (!info.coherentMemory) {
Shalini Sdb704c92023-01-27 21:35:33 +00003943 lock.unlock();
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003944 VkEncoder* enc = (VkEncoder*)context;
3945 enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
3946 return;
Lingfeng Yange9979522018-12-25 14:44:52 -08003947 }
3948
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003949 auto coherentMemory = freeCoherentMemoryLocked(memory, info);
Lingfeng Yangdef88ba2018-12-13 12:43:17 -08003950
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003951 // We have to release the lock before we could possibly free a
3952 // CoherentMemory, because that will call into VkEncoder, which
3953 // shouldn't be called when the lock is held.
3954 lock.unlock();
3955 coherentMemory = nullptr;
3956}
Lingfeng Yangdef88ba2018-12-13 12:43:17 -08003957
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003958VkResult ResourceTracker::on_vkMapMemory(void* context, VkResult host_result, VkDevice device,
3959 VkDeviceMemory memory, VkDeviceSize offset,
3960 VkDeviceSize size, VkMemoryMapFlags, void** ppData) {
3961 if (host_result != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003962 mesa_loge("%s: Host failed to map", __func__);
Lingfeng Yangdef88ba2018-12-13 12:43:17 -08003963 return host_result;
3964 }
3965
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003966 AutoLock<RecursiveLock> lock(mLock);
3967
Jason Macnak24e20f82024-04-26 15:54:09 -07003968 auto deviceMemoryInfoIt = info_VkDeviceMemory.find(memory);
3969 if (deviceMemoryInfoIt == info_VkDeviceMemory.end()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003970 mesa_loge("%s: Failed to find VkDeviceMemory.", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003971 return VK_ERROR_MEMORY_MAP_FAILED;
Lingfeng Yangdef88ba2018-12-13 12:43:17 -08003972 }
Jason Macnak24e20f82024-04-26 15:54:09 -07003973 auto& deviceMemoryInfo = deviceMemoryInfoIt->second;
Lingfeng Yangdef88ba2018-12-13 12:43:17 -08003974
Jason Macnak24e20f82024-04-26 15:54:09 -07003975 if (deviceMemoryInfo.blobId && !deviceMemoryInfo.coherentMemory &&
3976 !mCaps.params[kParamCreateGuestHandle]) {
3977 // NOTE: must not hold lock while calling into the encoder.
3978 lock.unlock();
Lingfeng Yang4af5f322019-02-14 08:10:28 -08003979 VkEncoder* enc = (VkEncoder*)context;
Jason Macnak24e20f82024-04-26 15:54:09 -07003980 VkResult vkResult = enc->vkGetBlobGOOGLE(device, memory, /*doLock*/ false);
3981 if (vkResult != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07003982 mesa_loge("%s: Failed to vkGetBlobGOOGLE().", __func__);
Jason Macnak24e20f82024-04-26 15:54:09 -07003983 return vkResult;
3984 }
3985 lock.lock();
Lingfeng Yang3e87e852019-02-19 14:12:49 -08003986
Jason Macnak24e20f82024-04-26 15:54:09 -07003987 // NOTE: deviceMemoryInfoIt potentially invalidated but deviceMemoryInfo still okay.
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003988
3989 struct VirtGpuCreateBlob createBlob = {};
3990 createBlob.blobMem = kBlobMemHost3d;
3991 createBlob.flags = kBlobFlagMappable;
Jason Macnak24e20f82024-04-26 15:54:09 -07003992 createBlob.blobId = deviceMemoryInfo.blobId;
3993 createBlob.size = deviceMemoryInfo.coherentMemorySize;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003994
Jason Macnak24e20f82024-04-26 15:54:09 -07003995 auto blob = VirtGpuDevice::getInstance()->createBlob(createBlob);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003996 if (!blob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3997
Jason Macnak24e20f82024-04-26 15:54:09 -07003998 VirtGpuResourceMappingPtr mapping = blob->createMapping();
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07003999 if (!mapping) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4000
4001 auto coherentMemory =
4002 std::make_shared<CoherentMemory>(mapping, createBlob.size, device, memory);
4003
Jason Macnak24e20f82024-04-26 15:54:09 -07004004 uint8_t* ptr;
4005 uint64_t offset;
4006 coherentMemory->subAllocate(deviceMemoryInfo.allocationSize, &ptr, offset);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004007
Jason Macnak24e20f82024-04-26 15:54:09 -07004008 deviceMemoryInfo.coherentMemoryOffset = offset;
4009 deviceMemoryInfo.coherentMemory = coherentMemory;
4010 deviceMemoryInfo.ptr = ptr;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004011 }
4012
Jason Macnak24e20f82024-04-26 15:54:09 -07004013 if (!deviceMemoryInfo.ptr) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004014 mesa_loge("%s: VkDeviceMemory has nullptr.", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004015 return VK_ERROR_MEMORY_MAP_FAILED;
4016 }
4017
Jason Macnak24e20f82024-04-26 15:54:09 -07004018 if (size != VK_WHOLE_SIZE && (deviceMemoryInfo.ptr + offset + size >
4019 deviceMemoryInfo.ptr + deviceMemoryInfo.allocationSize)) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004020 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004021 "%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx "
Jason Macnak24e20f82024-04-26 15:54:09 -07004022 "total 0x%llx",
4023 __func__, (unsigned long long)deviceMemoryInfo.allocationSize,
4024 (unsigned long long)offset, (unsigned long long)size, (unsigned long long)offset);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004025 return VK_ERROR_MEMORY_MAP_FAILED;
4026 }
4027
Jason Macnak24e20f82024-04-26 15:54:09 -07004028 *ppData = deviceMemoryInfo.ptr + offset;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004029
4030 return host_result;
4031}
4032
4033void ResourceTracker::on_vkUnmapMemory(void*, VkDevice, VkDeviceMemory) {
4034 // no-op
4035}
4036
4037void ResourceTracker::transformImageMemoryRequirements2ForGuest(VkImage image,
4038 VkMemoryRequirements2* reqs2) {
4039 AutoLock<RecursiveLock> lock(mLock);
4040
4041 auto it = info_VkImage.find(image);
4042 if (it == info_VkImage.end()) return;
4043
4044 auto& info = it->second;
4045
4046 if (!info.external || !info.externalCreateInfo.handleTypes) {
Gurchetan Singhc4444b82023-09-19 08:06:20 -07004047 transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004048 return;
4049 }
4050
Gurchetan Singhc4444b82023-09-19 08:06:20 -07004051 transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004052
4053 VkMemoryDedicatedRequirements* dedicatedReqs =
4054 vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4055
4056 if (!dedicatedReqs) return;
4057
4058 transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4059}
4060
4061void ResourceTracker::transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,
4062 VkMemoryRequirements2* reqs2) {
4063 AutoLock<RecursiveLock> lock(mLock);
4064
4065 auto it = info_VkBuffer.find(buffer);
4066 if (it == info_VkBuffer.end()) return;
4067
4068 auto& info = it->second;
4069
4070 if (!info.external || !info.externalCreateInfo.handleTypes) {
4071 return;
4072 }
4073
4074 VkMemoryDedicatedRequirements* dedicatedReqs =
4075 vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4076
4077 if (!dedicatedReqs) return;
4078
4079 transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4080}
4081
4082VkResult ResourceTracker::on_vkCreateImage(void* context, VkResult, VkDevice device,
4083 const VkImageCreateInfo* pCreateInfo,
4084 const VkAllocationCallbacks* pAllocator,
4085 VkImage* pImage) {
4086 VkEncoder* enc = (VkEncoder*)context;
4087
4088 VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
Yahan Zhoud25d7a12023-10-24 10:46:30 -07004089 if (localCreateInfo.sharingMode != VK_SHARING_MODE_CONCURRENT) {
4090 localCreateInfo.queueFamilyIndexCount = 0;
4091 localCreateInfo.pQueueFamilyIndices = nullptr;
4092 }
4093
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004094 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4095 VkExternalMemoryImageCreateInfo localExtImgCi;
4096
4097 const VkExternalMemoryImageCreateInfo* extImgCiPtr =
4098 vk_find_struct<VkExternalMemoryImageCreateInfo>(pCreateInfo);
Gurchetan Singhc4444b82023-09-19 08:06:20 -07004099
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004100 if (extImgCiPtr) {
4101 localExtImgCi = vk_make_orphan_copy(*extImgCiPtr);
4102 vk_append_struct(&structChainIter, &localExtImgCi);
4103 }
Lingfeng Yang5c701122019-03-05 08:34:46 -08004104
Aaron Ruby42a8b752024-05-27 15:37:53 -04004105#if defined(LINUX_GUEST_BUILD)
4106 bool isDmaBufImage = false;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07004107 if (extImgCiPtr &&
4108 (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
Aaron Ruby42a8b752024-05-27 15:37:53 -04004109 const wsi_image_create_info* wsiImageCi =
4110 vk_find_struct<wsi_image_create_info>(pCreateInfo);
4111 if (wsiImageCi) {
4112 if (!wsiImageCi->scanout) {
4113 mesa_logd(
4114 "gfxstream only supports native DRM image scanout path for Linux WSI "
4115 "(wsi_image_create_info::scanout)");
4116 return VK_ERROR_INITIALIZATION_FAILED;
4117 }
4118 // Linux WSI creates swapchain images with VK_IMAGE_CREATE_ALIAS_BIT. Vulkan spec
4119 // states: "If the pNext chain includes a VkExternalMemoryImageCreateInfo or
4120 // VkExternalMemoryImageCreateInfoNV structure whose handleTypes member is not 0, it is
4121 // as if VK_IMAGE_CREATE_ALIAS_BIT is set." To avoid flag mismatches on host driver,
4122 // remove the VK_IMAGE_CREATE_ALIAS_BIT here.
4123 localCreateInfo.flags &= ~VK_IMAGE_CREATE_ALIAS_BIT;
4124 // TODO (b/326956485): DRM format modifiers to support client/compositor awareness
4125 // For now, override WSI images to use linear tiling, as compositor will default to
4126 // DRM_FORMAT_MOD_LINEAR.
4127 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07004128 }
Aaron Ruby42a8b752024-05-27 15:37:53 -04004129 isDmaBufImage = true;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07004130 }
4131#endif
4132
Lingfeng Yang5c701122019-03-05 08:34:46 -08004133#ifdef VK_USE_PLATFORM_ANDROID_KHR
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004134 VkNativeBufferANDROID localAnb;
4135 const VkNativeBufferANDROID* anbInfoPtr = vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
4136 if (anbInfoPtr) {
4137 localAnb = vk_make_orphan_copy(*anbInfoPtr);
4138 vk_append_struct(&structChainIter, &localAnb);
4139 }
Roman Kiryanovec610ae2019-04-25 17:11:26 -07004140
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004141 VkExternalFormatANDROID localExtFormatAndroid;
4142 const VkExternalFormatANDROID* extFormatAndroidPtr =
4143 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4144 if (extFormatAndroidPtr) {
4145 localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr);
Roman Kiryanovec610ae2019-04-25 17:11:26 -07004146
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004147 // Do not append external format android;
4148 // instead, replace the local image localCreateInfo format
4149 // with the corresponding Vulkan format
4150 if (extFormatAndroidPtr->externalFormat) {
Sergiuad918472024-05-21 16:28:45 +01004151 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004152 if (localCreateInfo.format == VK_FORMAT_UNDEFINED)
4153 return VK_ERROR_VALIDATION_FAILED_EXT;
Lingfeng Yang5c701122019-03-05 08:34:46 -08004154 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004155 }
Lingfeng Yang5c701122019-03-05 08:34:46 -08004156#endif
4157
David Revemane060bbc2019-03-18 20:52:46 -04004158#ifdef VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004159 const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
4160 vk_find_struct<VkBufferCollectionImageCreateInfoFUCHSIA>(pCreateInfo);
Yilong Lie12328f2022-01-06 03:32:13 -08004161
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004162 bool isSysmemBackedMemory = false;
Yilong Lie1a91332020-07-19 17:37:49 -07004163
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004164 if (extImgCiPtr &&
4165 (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
4166 isSysmemBackedMemory = true;
4167 }
Yilong Lie1a91332020-07-19 17:37:49 -07004168
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004169 if (extBufferCollectionPtr) {
4170 const auto& collection =
4171 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
David Revemandf8d9642019-04-24 12:13:36 -04004172 extBufferCollectionPtr->collection);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004173 uint32_t index = extBufferCollectionPtr->index;
4174 zx::vmo vmo;
David Reveman5b7c5842019-02-20 01:06:48 -05004175
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004176 fuchsia_sysmem::wire::BufferCollectionInfo2 info;
Yilong Li6dc9b5b2020-07-17 21:05:22 -07004177
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004178 auto result = collection->WaitForBuffersAllocated();
4179 if (result.ok() && result->status == ZX_OK) {
4180 info = std::move(result->buffer_collection_info);
4181 if (index < info.buffer_count && info.settings.has_image_format_constraints) {
4182 vmo = std::move(info.buffers[index].vmo);
David Revemand5d92d62019-03-29 15:19:25 -04004183 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004184 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004185 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
4186 GET_STATUS_SAFE(result, status));
David Revemandf8d9642019-04-24 12:13:36 -04004187 }
Yilong Lif610b672021-03-09 15:08:34 -08004188
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004189 if (vmo.is_valid()) {
4190 zx::vmo vmo_dup;
4191 if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
4192 status != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004193 mesa_loge("%s: zx_vmo_duplicate failed: %d", __func__, status);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004194 abort();
4195 }
4196
4197 auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
4198 if (!buffer_handle_result.ok()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004199 mesa_loge("%s: GetBufferHandle FIDL error: %d", __func__,
4200 buffer_handle_result.status());
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004201 abort();
4202 }
4203 if (buffer_handle_result.value().res == ZX_OK) {
4204 // Buffer handle already exists.
4205 // If it is a ColorBuffer, no-op; Otherwise return error.
4206 if (buffer_handle_result.value().type !=
4207 fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004208 mesa_loge("%s: BufferHandle %u is not a ColorBuffer", __func__,
4209 buffer_handle_result.value().id);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004210 return VK_ERROR_OUT_OF_HOST_MEMORY;
4211 }
4212 } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
4213 // Buffer handle not found. Create ColorBuffer based on buffer settings.
4214 auto format = info.settings.image_format_constraints.pixel_format.type ==
4215 fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
4216 ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
4217 : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
4218
4219 uint32_t memory_property =
4220 info.settings.buffer_settings.heap ==
4221 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
4222 ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
4223 : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
4224
4225 fidl::Arena arena;
4226 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
4227 createParams.set_width(info.settings.image_format_constraints.min_coded_width)
4228 .set_height(info.settings.image_format_constraints.min_coded_height)
4229 .set_format(format)
4230 .set_memory_property(memory_property);
4231
4232 auto result =
4233 mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
4234 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004235 mesa_logd("CreateColorBuffer: color buffer already exists\n");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004236 } else if (!result.ok() || result->res != ZX_OK) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004237 mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
4238 GET_STATUS_SAFE(result, res));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004239 }
4240 }
4241
4242 if (info.settings.buffer_settings.heap ==
4243 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004244 mesa_logd(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004245 "%s: Image uses host visible memory heap; set tiling "
4246 "to linear to match host ImageCreateInfo",
4247 __func__);
4248 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4249 }
Yilong Lif610b672021-03-09 15:08:34 -08004250 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004251 isSysmemBackedMemory = true;
4252 }
4253
4254 if (isSysmemBackedMemory) {
4255 localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4256 }
David Reveman5b7c5842019-02-20 01:06:48 -05004257#endif
4258
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004259 VkResult res;
4260 VkMemoryRequirements memReqs;
Lingfeng Yang87f11962019-06-27 16:28:53 +00004261
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004262 if (supportsCreateResourcesWithRequirements()) {
4263 res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage,
4264 &memReqs, true /* do lock */);
4265 } else {
4266 res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
4267 }
Lingfeng Yang3e87e852019-02-19 14:12:49 -08004268
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004269 if (res != VK_SUCCESS) return res;
Lingfeng Yang3e87e852019-02-19 14:12:49 -08004270
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004271 AutoLock<RecursiveLock> lock(mLock);
Lingfeng Yang3e87e852019-02-19 14:12:49 -08004272
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004273 auto it = info_VkImage.find(*pImage);
4274 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
Lingfeng Yang3e87e852019-02-19 14:12:49 -08004275
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004276 auto& info = it->second;
Lingfeng Yang3e87e852019-02-19 14:12:49 -08004277
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004278 info.device = device;
4279 info.createInfo = *pCreateInfo;
4280 info.createInfo.pNext = nullptr;
Lingfeng Yang3e87e852019-02-19 14:12:49 -08004281
Yilong Li90ffa1c2022-06-01 15:10:04 -07004282#ifdef VK_USE_PLATFORM_ANDROID_KHR
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004283 if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) {
4284 info.hasExternalFormat = true;
Sergiuad918472024-05-21 16:28:45 +01004285 info.externalFourccFormat = extFormatAndroidPtr->externalFormat;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004286 }
Yilong Li90ffa1c2022-06-01 15:10:04 -07004287#endif // VK_USE_PLATFORM_ANDROID_KHR
Kaiyi Lia713b412021-09-20 07:03:01 -07004288
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004289 if (supportsCreateResourcesWithRequirements()) {
4290 info.baseRequirementsKnown = true;
4291 }
Lingfeng Yang88a8daf2019-03-01 10:15:55 -08004292
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004293 if (extImgCiPtr) {
4294 info.external = true;
4295 info.externalCreateInfo = *extImgCiPtr;
4296 }
Lingfeng Yang87f11962019-06-27 16:28:53 +00004297
Yilong Lie202d552020-02-06 07:38:16 -08004298#ifdef VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004299 if (isSysmemBackedMemory) {
4300 info.isSysmemBackedMemory = true;
4301 }
Yilong Lie202d552020-02-06 07:38:16 -08004302#endif
Gurchetan Singhd56274b2023-04-10 10:34:16 -07004303
4304// Delete `protocolVersion` check goldfish drivers are gone.
Aaron Ruby42a8b752024-05-27 15:37:53 -04004305#if defined(VK_USE_PLATFORM_ANDROID_KHR)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004306 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4307 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4308 }
Aaron Ruby42a8b752024-05-27 15:37:53 -04004309 if ((extImgCiPtr && (extImgCiPtr->handleTypes &
Gurchetan Singhc4444b82023-09-19 08:06:20 -07004310 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004311 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4312 }
Jason Macnak66e397b2023-03-15 12:11:22 -07004313#endif
Aaron Ruby42a8b752024-05-27 15:37:53 -04004314#if defined(LINUX_GUEST_BUILD)
4315 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4316 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4317 }
4318 info.isDmaBufImage = isDmaBufImage;
4319 if (info.isDmaBufImage) {
4320 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
Aaron Rubya8140f42024-05-10 15:50:44 -04004321 if (localCreateInfo.tiling == VK_IMAGE_TILING_OPTIMAL) {
4322 // Linux WSI calls vkGetImageSubresourceLayout() to query the stride for swapchain
4323 // support. Similarly, stride is also queried from vkGetImageSubresourceLayout() to
4324 // determine the stride for colorBuffer resource creation (guest-side dmabuf resource).
4325 // To satisfy valid usage of this API, must call on the linearPeerImage for the VkImage
4326 // in question. As long as these two use cases match, the rowPitch won't actually be
4327 // used by WSI.
4328 VkImageCreateInfo linearPeerImageCreateInfo = {
4329 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4330 .pNext = nullptr,
4331 .flags = {},
4332 .imageType = VK_IMAGE_TYPE_2D,
4333 .format = localCreateInfo.format,
4334 .extent = localCreateInfo.extent,
4335 .mipLevels = 1,
4336 .arrayLayers = 1,
4337 .samples = VK_SAMPLE_COUNT_1_BIT,
4338 .tiling = VK_IMAGE_TILING_LINEAR,
4339 .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
4340 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
4341 .queueFamilyIndexCount = 0,
4342 .pQueueFamilyIndices = nullptr,
4343 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
4344 };
4345 res = enc->vkCreateImage(device, &linearPeerImageCreateInfo, pAllocator,
4346 &info.linearPeerImage, true /* do lock */);
4347 if (res != VK_SUCCESS) return res;
4348 }
Aaron Ruby42a8b752024-05-27 15:37:53 -04004349 }
4350#endif
Yilong Lie202d552020-02-06 07:38:16 -08004351
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004352 if (info.baseRequirementsKnown) {
4353 transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs);
4354 info.baseRequirements = memReqs;
Lingfeng Yang4af5f322019-02-14 08:10:28 -08004355 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004356 return res;
4357}
Lingfeng Yang4af5f322019-02-14 08:10:28 -08004358
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004359VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion(
4360 void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4361 const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4362 VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
Lingfeng Yang5c701122019-03-05 08:34:46 -08004363
4364#ifdef VK_USE_PLATFORM_ANDROID_KHR
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004365 const VkExternalFormatANDROID* extFormatAndroidPtr =
4366 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4367 if (extFormatAndroidPtr) {
Sergiuad918472024-05-21 16:28:45 +01004368 if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004369 // We don't support external formats on host and it causes RGB565
4370 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4371 // when passed as an external format.
4372 // We may consider doing this for all external formats.
4373 // See b/134771579.
4374 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4375 return VK_SUCCESS;
4376 } else if (extFormatAndroidPtr->externalFormat) {
Sergiuad918472024-05-21 16:28:45 +01004377 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
Lingfeng Yang5c701122019-03-05 08:34:46 -08004378 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004379 }
Lingfeng Yang5c701122019-03-05 08:34:46 -08004380#endif
4381
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004382 VkEncoder* enc = (VkEncoder*)context;
4383 VkResult res = enc->vkCreateSamplerYcbcrConversion(device, &localCreateInfo, pAllocator,
4384 pYcbcrConversion, true /* do lock */);
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004385
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004386 if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004387 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004388 "FATAL: vkCreateSamplerYcbcrConversion returned a reserved value "
4389 "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4390 abort();
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004391 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004392 return res;
4393}
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004394
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004395void ResourceTracker::on_vkDestroySamplerYcbcrConversion(void* context, VkDevice device,
4396 VkSamplerYcbcrConversion ycbcrConversion,
4397 const VkAllocationCallbacks* pAllocator) {
4398 VkEncoder* enc = (VkEncoder*)context;
4399 if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4400 enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator,
4401 true /* do lock */);
Lingfeng Yang5c701122019-03-05 08:34:46 -08004402 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004403}
Lingfeng Yang5c701122019-03-05 08:34:46 -08004404
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004405VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR(
4406 void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4407 const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4408 VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
Lingfeng Yang5c701122019-03-05 08:34:46 -08004409
Gurchetan Singh6c5a79d2022-02-10 11:47:59 -08004410#if defined(VK_USE_PLATFORM_ANDROID_KHR)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004411 const VkExternalFormatANDROID* extFormatAndroidPtr =
4412 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4413 if (extFormatAndroidPtr) {
Sergiuad918472024-05-21 16:28:45 +01004414 if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004415 // We don't support external formats on host and it causes RGB565
4416 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4417 // when passed as an external format.
4418 // We may consider doing this for all external formats.
4419 // See b/134771579.
4420 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4421 return VK_SUCCESS;
4422 } else if (extFormatAndroidPtr->externalFormat) {
Sergiuad918472024-05-21 16:28:45 +01004423 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
Lingfeng Yang5c701122019-03-05 08:34:46 -08004424 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004425 }
Lingfeng Yang5c701122019-03-05 08:34:46 -08004426#endif
4427
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004428 VkEncoder* enc = (VkEncoder*)context;
4429 VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(device, &localCreateInfo, pAllocator,
4430 pYcbcrConversion, true /* do lock */);
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004431
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004432 if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004433 mesa_loge(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004434 "FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value "
4435 "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4436 abort();
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004437 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004438 return res;
4439}
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004440
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004441void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR(
4442 void* context, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
4443 const VkAllocationCallbacks* pAllocator) {
4444 VkEncoder* enc = (VkEncoder*)context;
4445 if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4446 enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator,
4447 true /* do lock */);
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004448 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004449}
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004450
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004451VkResult ResourceTracker::on_vkCreateSampler(void* context, VkResult, VkDevice device,
4452 const VkSamplerCreateInfo* pCreateInfo,
4453 const VkAllocationCallbacks* pAllocator,
4454 VkSampler* pSampler) {
4455 VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4456 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004457
Yilong Li256f18f2020-06-01 23:41:22 +00004458#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004459 VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
4460 const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
4461 vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
4462 if (samplerYcbcrConversionInfo) {
4463 if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4464 localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
4465 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
Roman Kiryanov004f48c2019-06-28 14:59:32 -07004466 }
Lingfeng Yang5c701122019-03-05 08:34:46 -08004467 }
4468
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004469 VkSamplerCustomBorderColorCreateInfoEXT localVkSamplerCustomBorderColorCreateInfo;
4470 const VkSamplerCustomBorderColorCreateInfoEXT* samplerCustomBorderColorCreateInfo =
4471 vk_find_struct<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo);
4472 if (samplerCustomBorderColorCreateInfo) {
4473 localVkSamplerCustomBorderColorCreateInfo =
4474 vk_make_orphan_copy(*samplerCustomBorderColorCreateInfo);
4475 vk_append_struct(&structChainIter, &localVkSamplerCustomBorderColorCreateInfo);
4476 }
4477#endif
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004478
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004479 VkEncoder* enc = (VkEncoder*)context;
4480 return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
4481}
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004482
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004483void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties(
4484 void* context, VkPhysicalDevice physicalDevice,
4485 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4486 VkExternalFenceProperties* pExternalFenceProperties) {
4487 (void)context;
4488 (void)physicalDevice;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004489
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004490 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4491 pExternalFenceProperties->compatibleHandleTypes = 0;
4492 pExternalFenceProperties->externalFenceFeatures = 0;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004493
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004494 bool syncFd = pExternalFenceInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4495
4496 if (!syncFd) {
4497 return;
4498 }
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004499
Gurchetan Singh6c5a79d2022-02-10 11:47:59 -08004500#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004501 pExternalFenceProperties->exportFromImportedHandleTypes =
4502 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4503 pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4504 pExternalFenceProperties->externalFenceFeatures =
4505 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT | VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004506#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004507}
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004508
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004509void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR(
4510 void* context, VkPhysicalDevice physicalDevice,
4511 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4512 VkExternalFenceProperties* pExternalFenceProperties) {
4513 on_vkGetPhysicalDeviceExternalFenceProperties(context, physicalDevice, pExternalFenceInfo,
4514 pExternalFenceProperties);
4515}
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004516
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004517VkResult ResourceTracker::on_vkCreateFence(void* context, VkResult input_result, VkDevice device,
4518 const VkFenceCreateInfo* pCreateInfo,
4519 const VkAllocationCallbacks* pAllocator,
4520 VkFence* pFence) {
4521 VkEncoder* enc = (VkEncoder*)context;
4522 VkFenceCreateInfo finalCreateInfo = *pCreateInfo;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004523
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004524 const VkExportFenceCreateInfo* exportFenceInfoPtr =
4525 vk_find_struct<VkExportFenceCreateInfo>(pCreateInfo);
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004526
Gurchetan Singh6c5a79d2022-02-10 11:47:59 -08004527#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004528 bool exportSyncFd = exportFenceInfoPtr && (exportFenceInfoPtr->handleTypes &
4529 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004530#endif
4531
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004532 input_result =
4533 enc->vkCreateFence(device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004534
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004535 if (input_result != VK_SUCCESS) return input_result;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004536
Gurchetan Singh6c5a79d2022-02-10 11:47:59 -08004537#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004538 if (exportSyncFd) {
4539 if (!mFeatureInfo->hasVirtioGpuNativeSync) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004540 mesa_logi("%s: ensure sync device\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004541 ensureSyncDeviceFd();
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004542 }
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004543
Gurchetan Singh42361f72024-05-16 17:37:11 -07004544 mesa_logi("%s: getting fence info\n", __func__);
Gurchetan Singh6c906de2021-10-21 17:09:00 -07004545 AutoLock<RecursiveLock> lock(mLock);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004546 auto it = info_VkFence.find(*pFence);
4547
4548 if (it == info_VkFence.end()) return VK_ERROR_INITIALIZATION_FAILED;
4549
4550 auto& info = it->second;
4551
4552 info.external = true;
4553 info.exportFenceCreateInfo = *exportFenceInfoPtr;
Gurchetan Singh42361f72024-05-16 17:37:11 -07004554 mesa_logi("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004555 // syncFd is still -1 because we expect user to explicitly
4556 // export it via vkGetFenceFdKHR
4557 }
4558#endif
4559
4560 return input_result;
4561}
4562
4563void ResourceTracker::on_vkDestroyFence(void* context, VkDevice device, VkFence fence,
4564 const VkAllocationCallbacks* pAllocator) {
4565 VkEncoder* enc = (VkEncoder*)context;
4566 enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
4567}
4568
4569VkResult ResourceTracker::on_vkResetFences(void* context, VkResult, VkDevice device,
4570 uint32_t fenceCount, const VkFence* pFences) {
4571 VkEncoder* enc = (VkEncoder*)context;
4572 VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
4573
4574 if (res != VK_SUCCESS) return res;
4575
4576 if (!fenceCount) return res;
4577
4578 // Permanence: temporary
4579 // on fence reset, close the fence fd
4580 // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again
4581 AutoLock<RecursiveLock> lock(mLock);
4582 for (uint32_t i = 0; i < fenceCount; ++i) {
4583 VkFence fence = pFences[i];
4584 auto it = info_VkFence.find(fence);
4585 auto& info = it->second;
4586 if (!info.external) continue;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004587
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08004588#if GFXSTREAM_ENABLE_GUEST_GOLDFISH
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004589 if (info.syncFd >= 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004590 mesa_logi("%s: resetting fence. make fd -1\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004591 goldfish_sync_signal(info.syncFd);
4592 auto* syncHelper =
4593 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
4594 syncHelper->close(info.syncFd);
4595 info.syncFd = -1;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004596 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004597#endif
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004598 }
4599
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004600 return res;
4601}
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004602
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004603VkResult ResourceTracker::on_vkImportFenceFdKHR(void* context, VkResult, VkDevice device,
4604 const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
4605 (void)context;
4606 (void)device;
4607 (void)pImportFenceFdInfo;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004608
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004609 // Transference: copy
4610 // meaning dup() the incoming fd
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004611
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004612 VkEncoder* enc = (VkEncoder*)context;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004613
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004614 bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004615
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004616 if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004617
Gurchetan Singh6c5a79d2022-02-10 11:47:59 -08004618#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004619
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004620 bool syncFdImport = pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004621
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004622 if (!syncFdImport) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004623 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004624 return VK_ERROR_OUT_OF_HOST_MEMORY;
4625 }
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004626
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004627 AutoLock<RecursiveLock> lock(mLock);
4628 auto it = info_VkFence.find(pImportFenceFdInfo->fence);
4629 if (it == info_VkFence.end()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004630 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004631 return VK_ERROR_OUT_OF_HOST_MEMORY;
4632 }
4633
4634 auto& info = it->second;
4635
4636 auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08004637#if GFXSTREAM_ENABLE_GUEST_GOLDFISH
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004638 if (info.syncFd >= 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004639 mesa_logi("%s: previous sync fd exists, close it\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004640 goldfish_sync_signal(info.syncFd);
4641 syncHelper->close(info.syncFd);
4642 }
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08004643#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004644
4645 if (pImportFenceFdInfo->fd < 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004646 mesa_logi("%s: import -1, set to -1 and exit\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004647 info.syncFd = -1;
4648 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004649 mesa_logi("%s: import actual fd, dup and close()\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004650 info.syncFd = syncHelper->dup(pImportFenceFdInfo->fd);
4651 syncHelper->close(pImportFenceFdInfo->fd);
4652 }
4653 return VK_SUCCESS;
4654#else
4655 return VK_ERROR_OUT_OF_HOST_MEMORY;
4656#endif
4657}
4658
4659VkResult ResourceTracker::on_vkGetFenceFdKHR(void* context, VkResult, VkDevice device,
4660 const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) {
4661 // export operation.
4662 // first check if fence is signaled
4663 // then if so, return -1
4664 // else, queue work
4665
4666 VkEncoder* enc = (VkEncoder*)context;
4667
4668 bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE;
4669
4670 if (!hasFence) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004671 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004672 return VK_ERROR_OUT_OF_HOST_MEMORY;
4673 }
4674
4675#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4676 bool syncFdExport = pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4677
4678 if (!syncFdExport) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004679 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004680 return VK_ERROR_OUT_OF_HOST_MEMORY;
4681 }
4682
4683 VkResult currentFenceStatus =
4684 enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
4685
4686 if (VK_ERROR_DEVICE_LOST == currentFenceStatus) { // Other error
Gurchetan Singh42361f72024-05-16 17:37:11 -07004687 mesa_logi("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004688 *pFd = -1;
4689 return VK_ERROR_DEVICE_LOST;
4690 }
4691
4692 if (VK_NOT_READY == currentFenceStatus || VK_SUCCESS == currentFenceStatus) {
4693 // Fence is valid. We also create a new sync fd for a signaled
4694 // fence, because ANGLE will use the returned fd directly to
4695 // implement eglDupNativeFenceFDANDROID, where -1 is only returned
4696 // when error occurs.
Gurchetan Singh6c906de2021-10-21 17:09:00 -07004697 AutoLock<RecursiveLock> lock(mLock);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004698
4699 auto it = info_VkFence.find(pGetFdInfo->fence);
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004700 if (it == info_VkFence.end()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004701 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004702 return VK_ERROR_OUT_OF_HOST_MEMORY;
4703 }
4704
4705 auto& info = it->second;
4706
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004707 bool syncFdCreated = info.external && (info.exportFenceCreateInfo.handleTypes &
4708 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4709
4710 if (!syncFdCreated) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004711 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004712 return VK_ERROR_OUT_OF_HOST_MEMORY;
4713 }
4714
4715 if (mFeatureInfo->hasVirtioGpuNativeSync) {
4716 VkResult result;
4717 int64_t osHandle;
4718 uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
4719
4720 result = createFence(device, hostFenceHandle, osHandle);
4721 if (result != VK_SUCCESS) return result;
4722
4723 *pFd = osHandle;
4724 } else {
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08004725#if GFXSTREAM_ENABLE_GUEST_GOLDFISH
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004726 goldfish_sync_queue_work(
4727 mSyncDeviceFd, get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
4728 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
4729 pFd);
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08004730#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004731 }
4732
4733 // relinquish ownership
4734 info.syncFd = -1;
Gurchetan Singh42361f72024-05-16 17:37:11 -07004735 mesa_logi("%s: got fd: %d\n", __func__, *pFd);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004736 return VK_SUCCESS;
4737 }
4738 return VK_ERROR_DEVICE_LOST;
4739#else
4740 return VK_ERROR_OUT_OF_HOST_MEMORY;
4741#endif
4742}
4743
4744VkResult ResourceTracker::on_vkWaitForFences(void* context, VkResult, VkDevice device,
4745 uint32_t fenceCount, const VkFence* pFences,
4746 VkBool32 waitAll, uint64_t timeout) {
4747 VkEncoder* enc = (VkEncoder*)context;
4748
4749#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4750 std::vector<VkFence> fencesExternal;
4751 std::vector<int> fencesExternalWaitFds;
4752 std::vector<VkFence> fencesNonExternal;
4753
4754 AutoLock<RecursiveLock> lock(mLock);
4755
4756 for (uint32_t i = 0; i < fenceCount; ++i) {
4757 auto it = info_VkFence.find(pFences[i]);
4758 if (it == info_VkFence.end()) continue;
4759 const auto& info = it->second;
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004760 if (info.syncFd >= 0) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004761 fencesExternal.push_back(pFences[i]);
4762 fencesExternalWaitFds.push_back(info.syncFd);
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004763 } else {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004764 fencesNonExternal.push_back(pFences[i]);
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004765 }
Lingfeng Yangcd2d8fe2019-08-16 12:21:50 -07004766 }
4767
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004768 lock.unlock();
Gurchetan Singh72a67582022-09-09 13:52:16 -07004769
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004770 if (fencesExternal.empty()) {
4771 // No need for work pool, just wait with host driver.
4772 return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout,
4773 true /* do lock */);
4774 } else {
4775 // Depending on wait any or wait all,
4776 // schedule a wait group with waitAny/waitAll
4777 std::vector<WorkPool::Task> tasks;
Gurchetan Singh72a67582022-09-09 13:52:16 -07004778
Gurchetan Singh42361f72024-05-16 17:37:11 -07004779 mesa_logi("%s: scheduling ext waits\n", __func__);
Gurchetan Singh72a67582022-09-09 13:52:16 -07004780
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004781 for (auto fd : fencesExternalWaitFds) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004782 mesa_logi("%s: wait on %d\n", __func__, fd);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004783 tasks.push_back([fd] {
4784 auto* syncHelper =
4785 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
4786 syncHelper->wait(fd, 3000);
Gurchetan Singh42361f72024-05-16 17:37:11 -07004787 mesa_logi("done waiting on fd %d\n", fd);
Lingfeng Yanga4ae0522021-02-17 14:12:19 -08004788 });
4789 }
4790
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004791 if (!fencesNonExternal.empty()) {
4792 tasks.push_back(
4793 [this, fencesNonExternal /* copy of vector */, device, waitAll, timeout] {
4794 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
4795 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
Gurchetan Singh42361f72024-05-16 17:37:11 -07004796 mesa_logi("%s: vkWaitForFences to host\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004797 vkEncoder->vkWaitForFences(device, fencesNonExternal.size(),
4798 fencesNonExternal.data(), waitAll, timeout,
4799 true /* do lock */);
4800 });
Lingfeng Yanga4ae0522021-02-17 14:12:19 -08004801 }
Lingfeng Yange9e77d52020-03-25 14:01:58 -07004802
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004803 auto waitGroupHandle = mWorkPool.schedule(tasks);
Lingfeng Yange9e77d52020-03-25 14:01:58 -07004804
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004805 // Convert timeout to microseconds from nanoseconds
4806 bool waitRes = false;
4807 if (waitAll) {
4808 waitRes = mWorkPool.waitAll(waitGroupHandle, timeout / 1000);
Lingfeng Yanga4ae0522021-02-17 14:12:19 -08004809 } else {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004810 waitRes = mWorkPool.waitAny(waitGroupHandle, timeout / 1000);
Lingfeng Yanga4ae0522021-02-17 14:12:19 -08004811 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004812
4813 if (waitRes) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004814 mesa_logi("%s: VK_SUCCESS\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004815 return VK_SUCCESS;
4816 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004817 mesa_logi("%s: VK_TIMEOUT\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004818 return VK_TIMEOUT;
4819 }
4820 }
4821#else
4822 return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4823#endif
4824}
4825
4826VkResult ResourceTracker::on_vkCreateDescriptorPool(void* context, VkResult, VkDevice device,
4827 const VkDescriptorPoolCreateInfo* pCreateInfo,
4828 const VkAllocationCallbacks* pAllocator,
4829 VkDescriptorPool* pDescriptorPool) {
4830 VkEncoder* enc = (VkEncoder*)context;
4831
4832 VkResult res = enc->vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool,
4833 true /* do lock */);
4834
4835 if (res != VK_SUCCESS) return res;
4836
4837 VkDescriptorPool pool = *pDescriptorPool;
4838
4839 struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
4840 dp->allocInfo = new DescriptorPoolAllocationInfo;
4841 dp->allocInfo->device = device;
4842 dp->allocInfo->createFlags = pCreateInfo->flags;
4843 dp->allocInfo->maxSets = pCreateInfo->maxSets;
4844 dp->allocInfo->usedSets = 0;
4845
4846 for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
4847 dp->allocInfo->descriptorCountInfo.push_back({
4848 pCreateInfo->pPoolSizes[i].type, pCreateInfo->pPoolSizes[i].descriptorCount,
4849 0, /* used */
4850 });
Lingfeng Yange9e77d52020-03-25 14:01:58 -07004851 }
4852
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004853 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4854 std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
Lingfeng Yang03354c72020-03-26 13:00:51 -07004855
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004856 uint32_t count = pCreateInfo->maxSets;
4857 enc->vkCollectDescriptorPoolIdsGOOGLE(device, pool, &count, poolIds.data(),
4858 true /* do lock */);
Lingfeng Yang03354c72020-03-26 13:00:51 -07004859
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004860 dp->allocInfo->freePoolIds = poolIds;
Lingfeng Yang03354c72020-03-26 13:00:51 -07004861 }
4862
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004863 return res;
4864}
Lingfeng Yang03354c72020-03-26 13:00:51 -07004865
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004866void ResourceTracker::on_vkDestroyDescriptorPool(void* context, VkDevice device,
4867 VkDescriptorPool descriptorPool,
4868 const VkAllocationCallbacks* pAllocator) {
4869 if (!descriptorPool) return;
Lingfeng Yang03354c72020-03-26 13:00:51 -07004870
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004871 VkEncoder* enc = (VkEncoder*)context;
Lingfeng Yang03354c72020-03-26 13:00:51 -07004872
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004873 clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
Lingfeng Yangffb94af2021-04-22 15:16:35 -07004874
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004875 enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
4876}
Lingfeng Yangffb94af2021-04-22 15:16:35 -07004877
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004878VkResult ResourceTracker::on_vkResetDescriptorPool(void* context, VkResult, VkDevice device,
4879 VkDescriptorPool descriptorPool,
4880 VkDescriptorPoolResetFlags flags) {
4881 if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
4882
4883 VkEncoder* enc = (VkEncoder*)context;
4884
4885 VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
4886
4887 if (res != VK_SUCCESS) return res;
4888
4889 clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4890 return res;
4891}
4892
4893VkResult ResourceTracker::on_vkAllocateDescriptorSets(
4894 void* context, VkResult, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
4895 VkDescriptorSet* pDescriptorSets) {
4896 VkEncoder* enc = (VkEncoder*)context;
4897 auto ci = pAllocateInfo;
4898 auto sets = pDescriptorSets;
4899 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4900 // Using the pool ID's we collected earlier from the host
4901 VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
4902
4903 if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
4904
4905 for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
4906 register_VkDescriptorSet(sets[i]);
4907 VkDescriptorSetLayout setLayout =
4908 as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
4909
4910 // Need to add ref to the set layout in the virtual case
4911 // because the set itself might not be realized on host at the
4912 // same time
4913 struct goldfish_VkDescriptorSetLayout* dsl =
4914 as_goldfish_VkDescriptorSetLayout(setLayout);
4915 ++dsl->layoutInfo->refcount;
4916 }
4917 } else {
4918 VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
4919
4920 if (allocRes != VK_SUCCESS) return allocRes;
4921
4922 for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
4923 applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
4924 fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
4925 }
4926 }
4927
4928 return VK_SUCCESS;
4929}
4930
4931VkResult ResourceTracker::on_vkFreeDescriptorSets(void* context, VkResult, VkDevice device,
4932 VkDescriptorPool descriptorPool,
4933 uint32_t descriptorSetCount,
4934 const VkDescriptorSet* pDescriptorSets) {
4935 VkEncoder* enc = (VkEncoder*)context;
4936
4937 // Bit of robustness so that we can double free descriptor sets
4938 // and do other invalid usages
4939 // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070
4940 // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets)
4941 std::vector<VkDescriptorSet> toActuallyFree;
4942 {
4943 AutoLock<RecursiveLock> lock(mLock);
4944
4945 // Pool was destroyed
4946 if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
4947 return VK_SUCCESS;
Lingfeng Yangffb94af2021-04-22 15:16:35 -07004948 }
4949
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004950 if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool)) return VK_SUCCESS;
Lingfeng Yangffb94af2021-04-22 15:16:35 -07004951
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004952 std::vector<VkDescriptorSet> existingDescriptorSets;
4953 ;
4954
4955 // Check if this descriptor set was in the pool's set of allocated descriptor sets,
4956 // to guard against double free (Double free is allowed by the client)
4957 {
4958 auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
4959
4960 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
4961 if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07004962 mesa_logi(
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07004963 "%s: Warning: descriptor set %p not found in pool. Was this "
4964 "double-freed?\n",
4965 __func__, (void*)pDescriptorSets[i]);
4966 continue;
4967 }
4968
4969 auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
4970 if (it == info_VkDescriptorSet.end()) continue;
4971
4972 existingDescriptorSets.push_back(pDescriptorSets[i]);
4973 }
4974 }
4975
4976 for (auto set : existingDescriptorSets) {
4977 if (removeDescriptorSetFromPool(set,
4978 mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)) {
4979 toActuallyFree.push_back(set);
4980 }
4981 }
4982
4983 if (toActuallyFree.empty()) return VK_SUCCESS;
4984 }
4985
4986 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4987 // In the batched set update case, decrement refcount on the set layout
4988 // and only free on host if we satisfied a pending allocation on the
4989 // host.
4990 for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
4991 VkDescriptorSetLayout setLayout =
4992 as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
4993 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
4994 }
4995 freeDescriptorSetsIfHostAllocated(enc, device, (uint32_t)toActuallyFree.size(),
4996 toActuallyFree.data());
4997 } else {
4998 // In the non-batched set update case, just free them directly.
4999 enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(),
5000 toActuallyFree.data(), true /* do lock */);
5001 }
5002 return VK_SUCCESS;
5003}
5004
5005VkResult ResourceTracker::on_vkCreateDescriptorSetLayout(
5006 void* context, VkResult, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
5007 const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) {
5008 VkEncoder* enc = (VkEncoder*)context;
5009
5010 VkResult res = enc->vkCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout,
5011 true /* do lock */);
5012
5013 if (res != VK_SUCCESS) return res;
5014
5015 struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(*pSetLayout);
5016 dsl->layoutInfo = new DescriptorSetLayoutInfo;
5017 for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
5018 dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
5019 }
5020 dsl->layoutInfo->refcount = 1;
5021
5022 return res;
5023}
5024
5025void ResourceTracker::on_vkUpdateDescriptorSets(void* context, VkDevice device,
5026 uint32_t descriptorWriteCount,
5027 const VkWriteDescriptorSet* pDescriptorWrites,
5028 uint32_t descriptorCopyCount,
5029 const VkCopyDescriptorSet* pDescriptorCopies) {
5030 VkEncoder* enc = (VkEncoder*)context;
5031
5032 std::vector<VkDescriptorImageInfo> transformedImageInfos;
5033 std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
5034
5035 memcpy(transformedWrites.data(), pDescriptorWrites,
5036 sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
5037
5038 size_t imageInfosNeeded = 0;
5039 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5040 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5041 if (!transformedWrites[i].pImageInfo) continue;
5042
5043 imageInfosNeeded += transformedWrites[i].descriptorCount;
5044 }
5045
5046 transformedImageInfos.resize(imageInfosNeeded);
5047
5048 size_t imageInfoIndex = 0;
5049 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5050 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5051 if (!transformedWrites[i].pImageInfo) continue;
5052
5053 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5054 transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
5055 ++imageInfoIndex;
5056 }
5057 transformedWrites[i].pImageInfo =
5058 &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
5059 }
5060
5061 {
5062 // Validate and filter samplers
5063 AutoLock<RecursiveLock> lock(mLock);
Lingfeng Yangffb94af2021-04-22 15:16:35 -07005064 size_t imageInfoIndex = 0;
5065 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5066 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5067 if (!transformedWrites[i].pImageInfo) continue;
5068
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005069 bool isImmutableSampler = descriptorBindingIsImmutableSampler(
5070 transformedWrites[i].dstSet, transformedWrites[i].dstBinding);
5071
Lingfeng Yangffb94af2021-04-22 15:16:35 -07005072 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005073 if (isImmutableSampler) {
5074 transformedImageInfos[imageInfoIndex].sampler = 0;
5075 }
5076 transformedImageInfos[imageInfoIndex] =
5077 filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
Lingfeng Yangffb94af2021-04-22 15:16:35 -07005078 ++imageInfoIndex;
5079 }
Lingfeng Yangfaa2d452021-02-17 14:15:07 -08005080 }
Lingfeng Yang03354c72020-03-26 13:00:51 -07005081 }
5082
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005083 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
5084 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5085 VkDescriptorSet set = transformedWrites[i].dstSet;
5086 doEmulatedDescriptorWrite(&transformedWrites[i],
5087 as_goldfish_VkDescriptorSet(set)->reified);
5088 }
Jason Macnak119ec5d2022-06-23 16:18:33 -07005089
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005090 for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
5091 doEmulatedDescriptorCopy(
5092 &pDescriptorCopies[i],
5093 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
5094 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
5095 }
5096 } else {
5097 enc->vkUpdateDescriptorSets(device, descriptorWriteCount, transformedWrites.data(),
5098 descriptorCopyCount, pDescriptorCopies, true /* do lock */);
5099 }
5100}
5101
5102void ResourceTracker::on_vkDestroyImage(void* context, VkDevice device, VkImage image,
5103 const VkAllocationCallbacks* pAllocator) {
Jason Macnak119ec5d2022-06-23 16:18:33 -07005104#ifdef VK_USE_PLATFORM_ANDROID_KHR
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005105 auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5106 {
5107 AutoLock<RecursiveLock> lock(mLock); // do not guard encoder may cause
5108 // deadlock b/243339973
Jason Macnak119ec5d2022-06-23 16:18:33 -07005109
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005110 // Wait for any pending QSRIs to prevent a race between the Gfxstream host
5111 // potentially processing the below `vkDestroyImage()` from the VK encoder
5112 // command stream before processing a previously submitted
5113 // `VIRTIO_GPU_NATIVE_SYNC_VULKAN_QSRI_EXPORT` from the virtio-gpu command
5114 // stream which relies on the image existing.
5115 auto imageInfoIt = info_VkImage.find(image);
5116 if (imageInfoIt != info_VkImage.end()) {
Jason Macnak119ec5d2022-06-23 16:18:33 -07005117 auto& imageInfo = imageInfoIt->second;
5118 for (int syncFd : imageInfo.pendingQsriSyncFds) {
Jason Macnaka194bbf2023-07-20 10:21:13 -07005119 int syncWaitRet = syncHelper->wait(syncFd, 3000);
Jason Macnak119ec5d2022-06-23 16:18:33 -07005120 if (syncWaitRet < 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07005121 mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
5122 __func__, strerror(errno), errno);
Jason Macnak119ec5d2022-06-23 16:18:33 -07005123 }
Jason Macnaka194bbf2023-07-20 10:21:13 -07005124 syncHelper->close(syncFd);
Jason Macnak119ec5d2022-06-23 16:18:33 -07005125 }
5126 imageInfo.pendingQsriSyncFds.clear();
5127 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005128 }
Jason Macnak119ec5d2022-06-23 16:18:33 -07005129#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005130 VkEncoder* enc = (VkEncoder*)context;
Aaron Rubya8140f42024-05-10 15:50:44 -04005131#if defined(LINUX_GUEST_BUILD)
5132 auto imageInfoIt = info_VkImage.find(image);
5133 if (imageInfoIt != info_VkImage.end()) {
5134 auto& imageInfo = imageInfoIt->second;
5135 if (imageInfo.linearPeerImage) {
5136 enc->vkDestroyImage(device, imageInfo.linearPeerImage, pAllocator, true /* do lock */);
5137 }
5138 }
5139#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005140 enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
5141}
5142
5143void ResourceTracker::on_vkGetImageMemoryRequirements(void* context, VkDevice device, VkImage image,
5144 VkMemoryRequirements* pMemoryRequirements) {
5145 AutoLock<RecursiveLock> lock(mLock);
5146
5147 auto it = info_VkImage.find(image);
5148 if (it == info_VkImage.end()) return;
5149
5150 auto& info = it->second;
5151
5152 if (info.baseRequirementsKnown) {
5153 *pMemoryRequirements = info.baseRequirements;
5154 return;
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005155 }
5156
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005157 lock.unlock();
5158
5159 VkEncoder* enc = (VkEncoder*)context;
5160
5161 enc->vkGetImageMemoryRequirements(device, image, pMemoryRequirements, true /* do lock */);
5162
5163 lock.lock();
5164
5165 transformImageMemoryRequirementsForGuestLocked(image, pMemoryRequirements);
5166
5167 info.baseRequirementsKnown = true;
5168 info.baseRequirements = *pMemoryRequirements;
5169}
5170
5171void ResourceTracker::on_vkGetImageMemoryRequirements2(void* context, VkDevice device,
5172 const VkImageMemoryRequirementsInfo2* pInfo,
5173 VkMemoryRequirements2* pMemoryRequirements) {
5174 VkEncoder* enc = (VkEncoder*)context;
5175 enc->vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5176 transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5177}
5178
5179void ResourceTracker::on_vkGetImageMemoryRequirements2KHR(
5180 void* context, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
5181 VkMemoryRequirements2* pMemoryRequirements) {
5182 VkEncoder* enc = (VkEncoder*)context;
5183 enc->vkGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5184 transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5185}
5186
Aaron Rubya8140f42024-05-10 15:50:44 -04005187void ResourceTracker::on_vkGetImageSubresourceLayout(void* context, VkDevice device, VkImage image,
5188 const VkImageSubresource* pSubresource,
5189 VkSubresourceLayout* pLayout) {
5190 VkEncoder* enc = (VkEncoder*)context;
5191 VkImage targetImage = image;
5192#if defined(LINUX_GUEST_BUILD)
5193 auto it = info_VkImage.find(image);
5194 if (it == info_VkImage.end()) return;
5195 const auto& info = it->second;
5196 if (info.linearPeerImage) {
5197 targetImage = info.linearPeerImage;
5198 }
5199#endif
5200 enc->vkGetImageSubresourceLayout(device, targetImage, pSubresource, pLayout,
5201 true /* do lock */);
5202}
5203
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005204VkResult ResourceTracker::on_vkBindImageMemory(void* context, VkResult, VkDevice device,
5205 VkImage image, VkDeviceMemory memory,
5206 VkDeviceSize memoryOffset) {
5207 VkEncoder* enc = (VkEncoder*)context;
5208 // Do not forward calls with invalid handles to host.
5209 if (info_VkDeviceMemory.find(memory) == info_VkDeviceMemory.end() ||
5210 info_VkImage.find(image) == info_VkImage.end()) {
5211 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5212 }
5213 return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
5214}
5215
5216VkResult ResourceTracker::on_vkBindImageMemory2(void* context, VkResult, VkDevice device,
5217 uint32_t bindingCount,
5218 const VkBindImageMemoryInfo* pBindInfos) {
5219 VkEncoder* enc = (VkEncoder*)context;
5220
5221 if (bindingCount < 1 || !pBindInfos) {
5222 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
Yilong Lie202d552020-02-06 07:38:16 -08005223 }
5224
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005225 for (uint32_t i = 0; i < bindingCount; i++) {
5226 const VkBindImageMemoryInfo& bimi = pBindInfos[i];
Lingfeng Yang87f11962019-06-27 16:28:53 +00005227
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005228 auto imageIt = info_VkImage.find(bimi.image);
5229 if (imageIt == info_VkImage.end()) {
Yilong Lie1352212022-05-19 16:26:11 -07005230 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5231 }
Jason Macnake0ac2882023-06-07 09:13:51 -07005232
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005233 if (bimi.memory != VK_NULL_HANDLE) {
5234 auto memoryIt = info_VkDeviceMemory.find(bimi.memory);
5235 if (memoryIt == info_VkDeviceMemory.end()) {
Jason Macnake0ac2882023-06-07 09:13:51 -07005236 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5237 }
Jason Macnake0ac2882023-06-07 09:13:51 -07005238 }
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005239 }
5240
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005241 return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
5242}
5243
5244VkResult ResourceTracker::on_vkBindImageMemory2KHR(void* context, VkResult result, VkDevice device,
5245 uint32_t bindingCount,
5246 const VkBindImageMemoryInfo* pBindInfos) {
5247 return on_vkBindImageMemory2(context, result, device, bindingCount, pBindInfos);
5248}
5249
5250VkResult ResourceTracker::on_vkCreateBuffer(void* context, VkResult, VkDevice device,
5251 const VkBufferCreateInfo* pCreateInfo,
5252 const VkAllocationCallbacks* pAllocator,
5253 VkBuffer* pBuffer) {
5254 VkEncoder* enc = (VkEncoder*)context;
5255
5256 VkBufferCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
5257 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
5258 VkExternalMemoryBufferCreateInfo localExtBufCi;
5259
5260 const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
5261 vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo);
5262 if (extBufCiPtr) {
5263 localExtBufCi = vk_make_orphan_copy(*extBufCiPtr);
5264 vk_append_struct(&structChainIter, &localExtBufCi);
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005265 }
5266
Yahan Zhou8a4da212023-10-06 11:02:04 -07005267 VkBufferOpaqueCaptureAddressCreateInfo localCapAddrCi;
5268 const VkBufferOpaqueCaptureAddressCreateInfo* pCapAddrCi =
5269 vk_find_struct<VkBufferOpaqueCaptureAddressCreateInfo>(pCreateInfo);
5270 if (pCapAddrCi) {
5271 localCapAddrCi = vk_make_orphan_copy(*pCapAddrCi);
5272 vk_append_struct(&structChainIter, &localCapAddrCi);
5273 }
5274
5275 VkBufferDeviceAddressCreateInfoEXT localDevAddrCi;
5276 const VkBufferDeviceAddressCreateInfoEXT* pDevAddrCi =
5277 vk_find_struct<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo);
5278 if (pDevAddrCi) {
5279 localDevAddrCi = vk_make_orphan_copy(*pDevAddrCi);
5280 vk_append_struct(&structChainIter, &localDevAddrCi);
5281 }
5282
Yilong Lif8eda752020-07-01 19:19:45 -07005283#ifdef VK_USE_PLATFORM_FUCHSIA
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005284 Optional<zx::vmo> vmo;
5285 bool isSysmemBackedMemory = false;
Yilong Lif8eda752020-07-01 19:19:45 -07005286
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005287 if (extBufCiPtr &&
5288 (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
5289 isSysmemBackedMemory = true;
5290 }
5291
5292 const auto* extBufferCollectionPtr =
5293 vk_find_struct<VkBufferCollectionBufferCreateInfoFUCHSIA>(pCreateInfo);
5294
5295 if (extBufferCollectionPtr) {
5296 const auto& collection =
5297 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
5298 extBufferCollectionPtr->collection);
5299 uint32_t index = extBufferCollectionPtr->index;
5300
5301 auto result = collection->WaitForBuffersAllocated();
5302 if (result.ok() && result->status == ZX_OK) {
5303 auto& info = result->buffer_collection_info;
5304 if (index < info.buffer_count) {
5305 vmo = gfxstream::guest::makeOptional(std::move(info.buffers[index].vmo));
5306 }
5307 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07005308 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
5309 GET_STATUS_SAFE(result, status));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005310 }
5311
5312 if (vmo && vmo->is_valid()) {
5313 fidl::Arena arena;
5314 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
5315 createParams.set_size(arena, pCreateInfo->size)
5316 .set_memory_property(fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
5317
5318 auto result = mControlDevice->CreateBuffer2(std::move(*vmo), createParams);
5319 if (!result.ok() ||
5320 (result->is_error() != ZX_OK && result->error_value() != ZX_ERR_ALREADY_EXISTS)) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07005321 mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
5322 GET_STATUS_SAFE(result, error_value()));
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005323 }
Yilong Lie1a91332020-07-19 17:37:49 -07005324 isSysmemBackedMemory = true;
5325 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005326 }
Yilong Lif8eda752020-07-01 19:19:45 -07005327#endif // VK_USE_PLATFORM_FUCHSIA
5328
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005329 VkResult res;
5330 VkMemoryRequirements memReqs;
Lingfeng Yang87f11962019-06-27 16:28:53 +00005331
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005332 if (supportsCreateResourcesWithRequirements()) {
5333 res = enc->vkCreateBufferWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator,
5334 pBuffer, &memReqs, true /* do lock */);
5335 } else {
5336 res =
5337 enc->vkCreateBuffer(device, &localCreateInfo, pAllocator, pBuffer, true /* do lock */);
5338 }
Lingfeng Yang3e87e852019-02-19 14:12:49 -08005339
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005340 if (res != VK_SUCCESS) return res;
Lingfeng Yang3e87e852019-02-19 14:12:49 -08005341
Gurchetan Singhc4444b82023-09-19 08:06:20 -07005342#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005343 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5344 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5345 }
Gurchetan Singhc4444b82023-09-19 08:06:20 -07005346 if (extBufCiPtr &&
5347 ((extBufCiPtr->handleTypes &
5348 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) ||
5349 (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005350 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
5351 }
Jason Macnak66e397b2023-03-15 12:11:22 -07005352#endif
5353
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005354 AutoLock<RecursiveLock> lock(mLock);
Lingfeng Yang3e87e852019-02-19 14:12:49 -08005355
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005356 auto it = info_VkBuffer.find(*pBuffer);
5357 if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
Lingfeng Yang3e87e852019-02-19 14:12:49 -08005358
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005359 auto& info = it->second;
Lingfeng Yang3e87e852019-02-19 14:12:49 -08005360
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005361 info.createInfo = localCreateInfo;
5362 info.createInfo.pNext = nullptr;
Lingfeng Yang3e87e852019-02-19 14:12:49 -08005363
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005364 if (supportsCreateResourcesWithRequirements()) {
Lingfeng Yang87f11962019-06-27 16:28:53 +00005365 info.baseRequirementsKnown = true;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005366 info.baseRequirements = memReqs;
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005367 }
5368
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005369 if (extBufCiPtr) {
5370 info.external = true;
5371 info.externalCreateInfo = *extBufCiPtr;
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005372 }
5373
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005374#ifdef VK_USE_PLATFORM_FUCHSIA
5375 if (isSysmemBackedMemory) {
5376 info.isSysmemBackedMemory = true;
5377 }
5378#endif
5379
5380 return res;
5381}
5382
5383void ResourceTracker::on_vkDestroyBuffer(void* context, VkDevice device, VkBuffer buffer,
5384 const VkAllocationCallbacks* pAllocator) {
5385 VkEncoder* enc = (VkEncoder*)context;
5386 enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
5387}
5388
5389void ResourceTracker::on_vkGetBufferMemoryRequirements(void* context, VkDevice device,
5390 VkBuffer buffer,
5391 VkMemoryRequirements* pMemoryRequirements) {
5392 AutoLock<RecursiveLock> lock(mLock);
5393
5394 auto it = info_VkBuffer.find(buffer);
5395 if (it == info_VkBuffer.end()) return;
5396
5397 auto& info = it->second;
5398
5399 if (info.baseRequirementsKnown) {
5400 *pMemoryRequirements = info.baseRequirements;
5401 return;
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005402 }
5403
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005404 lock.unlock();
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005405
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005406 VkEncoder* enc = (VkEncoder*)context;
5407 enc->vkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements, true /* do lock */);
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005408
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005409 lock.lock();
Lingfeng Yang4af5f322019-02-14 08:10:28 -08005410
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005411 info.baseRequirementsKnown = true;
5412 info.baseRequirements = *pMemoryRequirements;
5413}
5414
5415void ResourceTracker::on_vkGetBufferMemoryRequirements2(
5416 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5417 VkMemoryRequirements2* pMemoryRequirements) {
5418 VkEncoder* enc = (VkEncoder*)context;
5419 enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5420 transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5421}
5422
5423void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR(
5424 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5425 VkMemoryRequirements2* pMemoryRequirements) {
5426 VkEncoder* enc = (VkEncoder*)context;
5427 enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5428 transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5429}
5430
5431VkResult ResourceTracker::on_vkBindBufferMemory(void* context, VkResult, VkDevice device,
5432 VkBuffer buffer, VkDeviceMemory memory,
5433 VkDeviceSize memoryOffset) {
5434 VkEncoder* enc = (VkEncoder*)context;
5435 return enc->vkBindBufferMemory(device, buffer, memory, memoryOffset, true /* do lock */);
5436}
5437
5438VkResult ResourceTracker::on_vkBindBufferMemory2(void* context, VkResult, VkDevice device,
5439 uint32_t bindInfoCount,
5440 const VkBindBufferMemoryInfo* pBindInfos) {
5441 VkEncoder* enc = (VkEncoder*)context;
5442 return enc->vkBindBufferMemory2(device, bindInfoCount, pBindInfos, true /* do lock */);
5443}
5444
5445VkResult ResourceTracker::on_vkBindBufferMemory2KHR(void* context, VkResult, VkDevice device,
5446 uint32_t bindInfoCount,
5447 const VkBindBufferMemoryInfo* pBindInfos) {
5448 VkEncoder* enc = (VkEncoder*)context;
5449 return enc->vkBindBufferMemory2KHR(device, bindInfoCount, pBindInfos, true /* do lock */);
5450}
5451
5452VkResult ResourceTracker::on_vkCreateSemaphore(void* context, VkResult input_result,
5453 VkDevice device,
5454 const VkSemaphoreCreateInfo* pCreateInfo,
5455 const VkAllocationCallbacks* pAllocator,
5456 VkSemaphore* pSemaphore) {
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04005457 (void)input_result;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005458 VkEncoder* enc = (VkEncoder*)context;
5459
5460 VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo;
5461
5462 const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr =
5463 vk_find_struct<VkExportSemaphoreCreateInfoKHR>(pCreateInfo);
5464
5465#ifdef VK_USE_PLATFORM_FUCHSIA
5466 bool exportEvent =
5467 exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5468 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA);
5469
5470 if (exportEvent) {
5471 finalCreateInfo.pNext = nullptr;
5472 // If we have timeline semaphores externally, leave it there.
5473 const VkSemaphoreTypeCreateInfo* typeCi =
5474 vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5475 if (typeCi) finalCreateInfo.pNext = typeCi;
5476 }
5477#endif
5478
Gurchetan Singh6c5a79d2022-02-10 11:47:59 -08005479#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005480 bool exportSyncFd = exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5481 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
5482
5483 if (exportSyncFd) {
5484 finalCreateInfo.pNext = nullptr;
5485 // If we have timeline semaphores externally, leave it there.
5486 const VkSemaphoreTypeCreateInfo* typeCi =
5487 vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5488 if (typeCi) finalCreateInfo.pNext = typeCi;
5489 }
5490#endif
5491 input_result = enc->vkCreateSemaphore(device, &finalCreateInfo, pAllocator, pSemaphore,
5492 true /* do lock */);
5493
5494 zx_handle_t event_handle = ZX_HANDLE_INVALID;
5495
5496#ifdef VK_USE_PLATFORM_FUCHSIA
5497 if (exportEvent) {
5498 zx_event_create(0, &event_handle);
5499 }
5500#endif
5501
5502 AutoLock<RecursiveLock> lock(mLock);
5503
5504 auto it = info_VkSemaphore.find(*pSemaphore);
5505 if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED;
5506
5507 auto& info = it->second;
5508
5509 info.device = device;
5510 info.eventHandle = event_handle;
5511#ifdef VK_USE_PLATFORM_FUCHSIA
5512 info.eventKoid = getEventKoid(info.eventHandle);
5513#endif
5514
5515#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5516 if (exportSyncFd) {
5517 if (mFeatureInfo->hasVirtioGpuNativeSync) {
5518 VkResult result;
5519 int64_t osHandle;
5520 uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
5521
5522 result = createFence(device, hostFenceHandle, osHandle);
5523 if (result != VK_SUCCESS) return result;
5524
5525 info.syncFd.emplace(osHandle);
Lingfeng Yang5c701122019-03-05 08:34:46 -08005526 } else {
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08005527#if GFXSTREAM_ENABLE_GUEST_GOLDFISH
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005528 ensureSyncDeviceFd();
5529
5530 if (exportSyncFd) {
5531 int syncFd = -1;
5532 goldfish_sync_queue_work(
5533 mSyncDeviceFd, get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
5534 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */
5535 ,
5536 &syncFd);
5537 info.syncFd.emplace(syncFd);
5538 }
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08005539#endif
Lingfeng Yang5c701122019-03-05 08:34:46 -08005540 }
Lingfeng Yang5c701122019-03-05 08:34:46 -08005541 }
Yahan Zhoua499e442019-02-26 16:35:01 -08005542#endif
Lingfeng Yang5c701122019-03-05 08:34:46 -08005543
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005544 return VK_SUCCESS;
5545}
5546
5547void ResourceTracker::on_vkDestroySemaphore(void* context, VkDevice device, VkSemaphore semaphore,
5548 const VkAllocationCallbacks* pAllocator) {
5549 VkEncoder* enc = (VkEncoder*)context;
5550 enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
5551}
5552
5553// https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR
5554// Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership
5555// of it to the application. To avoid leaking resources, the application must release ownership
5556// of the file descriptor when it is no longer needed.
5557VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(void* context, VkResult, VkDevice device,
5558 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
5559 int* pFd) {
Gurchetan Singh6c5a79d2022-02-10 11:47:59 -08005560#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005561 VkEncoder* enc = (VkEncoder*)context;
5562 bool getSyncFd = pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Lingfeng Yang5c701122019-03-05 08:34:46 -08005563
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005564 if (getSyncFd) {
Gurchetan Singh6c906de2021-10-21 17:09:00 -07005565 AutoLock<RecursiveLock> lock(mLock);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005566 auto it = info_VkSemaphore.find(pGetFdInfo->semaphore);
5567 if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY;
5568 auto& semInfo = it->second;
5569 // syncFd is supposed to have value.
5570 auto* syncHelper =
5571 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5572 *pFd = syncHelper->dup(semInfo.syncFd.value_or(-1));
David Reveman5b7c5842019-02-20 01:06:48 -05005573 return VK_SUCCESS;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005574 } else {
5575 // opaque fd
5576 int hostFd = 0;
5577 VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
5578 if (result != VK_SUCCESS) {
Lingfeng Yangf4344412019-03-18 19:02:09 -07005579 return result;
Yahan Zhoua499e442019-02-26 16:35:01 -08005580 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005581 *pFd = memfd_create("vk_opaque_fd", 0);
5582 write(*pFd, &hostFd, sizeof(hostFd));
David Reveman5b7c5842019-02-20 01:06:48 -05005583 return VK_SUCCESS;
David Reveman24710222019-02-25 02:21:42 -05005584 }
Jason Macnak650c0c02023-07-20 16:06:53 -07005585#else
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005586 (void)context;
5587 (void)device;
5588 (void)pGetFdInfo;
5589 (void)pFd;
5590 return VK_ERROR_INCOMPATIBLE_DRIVER;
Jason Macnak650c0c02023-07-20 16:06:53 -07005591#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005592}
Lingfeng Yang236abc92018-12-21 20:19:33 -08005593
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005594VkResult ResourceTracker::on_vkImportSemaphoreFdKHR(
5595 void* context, VkResult input_result, VkDevice device,
5596 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
5597#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5598 VkEncoder* enc = (VkEncoder*)context;
5599 if (input_result != VK_SUCCESS) {
Lingfeng Yang236abc92018-12-21 20:19:33 -08005600 return input_result;
5601 }
5602
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005603 auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5604
5605 if (pImportSemaphoreFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
5606 VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
Lingfeng Yang05d5ea32019-03-23 00:12:39 -07005607
Gurchetan Singh6c906de2021-10-21 17:09:00 -07005608 AutoLock<RecursiveLock> lock(mLock);
Lingfeng Yang05d5ea32019-03-23 00:12:39 -07005609
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005610 auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore);
5611 auto& info = semaphoreIt->second;
5612
5613 if (info.syncFd.value_or(-1) >= 0) {
5614 syncHelper->close(info.syncFd.value());
Lingfeng Yang05d5ea32019-03-23 00:12:39 -07005615 }
5616
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005617 info.syncFd.emplace(pImportSemaphoreFdInfo->fd);
Lingfeng Yang05d5ea32019-03-23 00:12:39 -07005618
5619 return VK_SUCCESS;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005620 } else {
5621 int fd = pImportSemaphoreFdInfo->fd;
5622 int err = lseek(fd, 0, SEEK_SET);
5623 if (err == -1) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07005624 mesa_loge("lseek fail on import semaphore");
Lingfeng Yang05d5ea32019-03-23 00:12:39 -07005625 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005626 int hostFd = 0;
5627 read(fd, &hostFd, sizeof(hostFd));
5628 VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5629 tmpInfo.fd = hostFd;
5630 VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
5631 syncHelper->close(fd);
5632 return result;
Lingfeng Yang05d5ea32019-03-23 00:12:39 -07005633 }
Gurchetan Singh6c5a79d2022-02-10 11:47:59 -08005634#else
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005635 (void)context;
5636 (void)input_result;
5637 (void)device;
5638 (void)pImportSemaphoreFdInfo;
5639 return VK_ERROR_INCOMPATIBLE_DRIVER;
5640#endif
5641}
Yilong Lid1c69192021-02-05 02:06:07 -08005642
Aaron Ruby16b349b2024-04-26 13:52:40 -04005643VkResult ResourceTracker::on_vkGetMemoryFdPropertiesKHR(
5644 void* context, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd,
5645 VkMemoryFdPropertiesKHR* pMemoryFdProperties) {
5646#if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5647 if (!(handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07005648 mesa_loge("%s: VK_KHR_external_memory_fd behavior not defined for handleType: 0x%x\n",
5649 __func__, handleType);
Aaron Ruby16b349b2024-04-26 13:52:40 -04005650 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
5651 }
5652 // Sanity-check device
5653 AutoLock<RecursiveLock> lock(mLock);
5654 auto deviceIt = info_VkDevice.find(device);
5655 if (deviceIt == info_VkDevice.end()) {
5656 return VK_ERROR_OUT_OF_HOST_MEMORY;
5657 }
5658 // TODO: Verify FD valid ?
5659 (void)fd;
5660
5661 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5662 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5663 }
5664
5665 updateMemoryTypeBits(&pMemoryFdProperties->memoryTypeBits,
5666 mCaps.vulkanCapset.colorBufferMemoryIndex);
5667
5668 return VK_SUCCESS;
5669#else
5670 (void)context;
5671 (void)device;
5672 (void)handleType;
5673 (void)fd;
5674 (void)pMemoryFdProperties;
5675 return VK_ERROR_INCOMPATIBLE_DRIVER;
5676#endif
5677}
5678
Gurchetan Singhc4444b82023-09-19 08:06:20 -07005679VkResult ResourceTracker::on_vkGetMemoryFdKHR(void* context, VkResult, VkDevice device,
5680 const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) {
5681#if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5682 if (!pGetFdInfo) return VK_ERROR_OUT_OF_HOST_MEMORY;
5683 if (!pGetFdInfo->memory) return VK_ERROR_OUT_OF_HOST_MEMORY;
5684
5685 if (!(pGetFdInfo->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
5686 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07005687 mesa_loge("%s: Export operation not defined for handleType: 0x%x\n", __func__,
5688 pGetFdInfo->handleType);
Gurchetan Singhc4444b82023-09-19 08:06:20 -07005689 return VK_ERROR_OUT_OF_HOST_MEMORY;
5690 }
5691 // Sanity-check device
5692 AutoLock<RecursiveLock> lock(mLock);
5693 auto deviceIt = info_VkDevice.find(device);
5694 if (deviceIt == info_VkDevice.end()) {
5695 return VK_ERROR_OUT_OF_HOST_MEMORY;
5696 }
5697
5698 auto deviceMemIt = info_VkDeviceMemory.find(pGetFdInfo->memory);
5699 if (deviceMemIt == info_VkDeviceMemory.end()) {
5700 return VK_ERROR_OUT_OF_HOST_MEMORY;
5701 }
5702 auto& info = deviceMemIt->second;
5703
5704 if (!info.blobPtr) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07005705 mesa_loge("%s: VkDeviceMemory does not have a resource available for export.\n", __func__);
Gurchetan Singhc4444b82023-09-19 08:06:20 -07005706 return VK_ERROR_OUT_OF_HOST_MEMORY;
5707 }
5708
5709 VirtGpuExternalHandle handle{};
5710 int ret = info.blobPtr->exportBlob(handle);
5711 if (ret != 0 || handle.osHandle < 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07005712 mesa_loge("%s: Failed to export host resource to FD.\n", __func__);
Gurchetan Singhc4444b82023-09-19 08:06:20 -07005713 return VK_ERROR_OUT_OF_HOST_MEMORY;
5714 }
5715 *pFd = handle.osHandle;
5716 return VK_SUCCESS;
5717#else
5718 (void)context;
5719 (void)device;
5720 (void)pGetFdInfo;
5721 (void)pFd;
5722 return VK_ERROR_INCOMPATIBLE_DRIVER;
5723#endif
5724}
5725
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005726void ResourceTracker::flushCommandBufferPendingCommandsBottomUp(
5727 void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
5728 if (workingSet.empty()) return;
Jason Macnakd86380c2020-09-03 11:02:21 -07005729
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005730 std::vector<VkCommandBuffer> nextLevel;
5731 for (auto commandBuffer : workingSet) {
Lingfeng Yang34eec782020-09-24 18:22:44 -07005732 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005733 forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5734 nextLevel.push_back((VkCommandBuffer)secondary);
5735 });
Lingfeng Yang39a276e2019-06-17 13:27:22 -07005736 }
5737
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005738 flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
Lingfeng Yang256f9252020-07-14 14:27:33 -07005739
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005740 // After this point, everyone at the previous level has been flushed
5741 for (auto cmdbuf : workingSet) {
5742 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
Lingfeng Yangf89f75d2020-07-09 17:42:33 -07005743
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005744 // There's no pending commands here, skip. (case 1)
5745 if (!cb->privateStream) continue;
Lingfeng Yang34eec782020-09-24 18:22:44 -07005746
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005747 unsigned char* writtenPtr = 0;
5748 size_t written = 0;
5749 CommandBufferStagingStream* cmdBufStream =
5750 static_cast<CommandBufferStagingStream*>(cb->privateStream);
5751 cmdBufStream->getWritten(&writtenPtr, &written);
Lingfeng Yanga1d57a52021-01-08 14:02:14 -08005752
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005753 // There's no pending commands here, skip. (case 2, stream created but no new recordings)
5754 if (!written) continue;
Lingfeng Yanga1d57a52021-01-08 14:02:14 -08005755
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005756 // There are pending commands to flush.
5757 VkEncoder* enc = (VkEncoder*)context;
5758 VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory();
5759 VkDeviceSize dataOffset = 0;
Shalini Sdb704c92023-01-27 21:35:33 +00005760 if (mFeatureInfo->hasVulkanAuxCommandMemory) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005761 // for suballocations, deviceMemory is an alias VkDeviceMemory
5762 // get underling VkDeviceMemory for given alias
5763 deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset,
5764 1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/,
5765 nullptr /*typeIndex*/, 0 /*typeIndexCount*/,
5766 nullptr /*typeBits*/, 0 /*typeBitCounts*/);
5767
5768 // mark stream as flushing before flushing commands
5769 cmdBufStream->markFlushing();
5770 enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory, dataOffset,
5771 written, true /*do lock*/);
5772 } else {
5773 enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr,
5774 true /* do lock */);
5775 }
5776 // Reset this stream.
5777 // flushing happens on vkQueueSubmit
5778 // vulkan api states that on queue submit,
5779 // applications MUST not attempt to modify the command buffer in any way
5780 // -as the device may be processing the commands recorded to it.
5781 // It is safe to call reset() here for this reason.
5782 // Command Buffer associated with this stream will only leave pending state
5783 // after queue submit is complete and host has read the data
5784 cmdBufStream->reset();
5785 }
5786}
5787
5788uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
5789 if (!supportsAsyncQueueSubmit()) {
5790 return 0;
5791 }
5792
5793 struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
5794 if (!q) return 0;
5795
5796 auto lastEncoder = q->lastUsedEncoder;
5797
5798 if (lastEncoder == currentEncoder) return 0;
5799
5800 currentEncoder->incRef();
5801
5802 q->lastUsedEncoder = currentEncoder;
5803
5804 if (!lastEncoder) return 0;
5805
5806 auto oldSeq = q->sequenceNumber;
5807 q->sequenceNumber += 2;
5808 lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
5809 lastEncoder->flush();
5810 currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
5811
5812 if (lastEncoder->decRef()) {
5813 q->lastUsedEncoder = nullptr;
5814 }
5815
5816 return 0;
5817}
5818
5819template <class VkSubmitInfoType>
5820void ResourceTracker::flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount,
5821 const VkSubmitInfoType* pSubmits) {
5822 std::vector<VkCommandBuffer> toFlush;
5823 for (uint32_t i = 0; i < submitCount; ++i) {
5824 for (uint32_t j = 0; j < getCommandBufferCount(pSubmits[i]); ++j) {
5825 toFlush.push_back(getCommandBuffer(pSubmits[i], j));
5826 }
5827 }
5828
5829 std::unordered_set<VkDescriptorSet> pendingSets;
5830 collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
5831 commitDescriptorSetUpdates(context, queue, pendingSets);
5832
5833 flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
5834
5835 for (auto cb : toFlush) {
5836 resetCommandBufferPendingTopology(cb);
5837 }
5838}
5839
5840VkResult ResourceTracker::on_vkQueueSubmit(void* context, VkResult input_result, VkQueue queue,
5841 uint32_t submitCount, const VkSubmitInfo* pSubmits,
5842 VkFence fence) {
5843 AEMU_SCOPED_TRACE("on_vkQueueSubmit");
5844 return on_vkQueueSubmitTemplate<VkSubmitInfo>(context, input_result, queue, submitCount,
5845 pSubmits, fence);
5846}
5847
5848VkResult ResourceTracker::on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue,
5849 uint32_t submitCount, const VkSubmitInfo2* pSubmits,
5850 VkFence fence) {
5851 AEMU_SCOPED_TRACE("on_vkQueueSubmit2");
5852 return on_vkQueueSubmitTemplate<VkSubmitInfo2>(context, input_result, queue, submitCount,
5853 pSubmits, fence);
5854}
5855
5856VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
5857 const VkSubmitInfo* pSubmits, VkFence fence) {
5858 if (supportsAsyncQueueSubmit()) {
5859 enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
5860 return VK_SUCCESS;
5861 } else {
5862 return enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
5863 }
5864}
5865
5866VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
5867 const VkSubmitInfo2* pSubmits, VkFence fence) {
5868 if (supportsAsyncQueueSubmit()) {
5869 enc->vkQueueSubmitAsync2GOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
5870 return VK_SUCCESS;
5871 } else {
5872 return enc->vkQueueSubmit2(queue, submitCount, pSubmits, fence, true /* do lock */);
5873 }
5874}
5875
5876template <typename VkSubmitInfoType>
5877VkResult ResourceTracker::on_vkQueueSubmitTemplate(void* context, VkResult input_result,
5878 VkQueue queue, uint32_t submitCount,
5879 const VkSubmitInfoType* pSubmits,
5880 VkFence fence) {
5881 flushStagingStreams(context, queue, submitCount, pSubmits);
5882
5883 std::vector<VkSemaphore> pre_signal_semaphores;
5884 std::vector<zx_handle_t> pre_signal_events;
5885 std::vector<int> pre_signal_sync_fds;
5886 std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
5887 std::vector<int> post_wait_sync_fds;
5888
5889 VkEncoder* enc = (VkEncoder*)context;
5890
5891 AutoLock<RecursiveLock> lock(mLock);
5892
5893 for (uint32_t i = 0; i < submitCount; ++i) {
5894 for (uint32_t j = 0; j < getWaitSemaphoreCount(pSubmits[i]); ++j) {
5895 VkSemaphore semaphore = getWaitSemaphore(pSubmits[i], j);
5896 auto it = info_VkSemaphore.find(semaphore);
5897 if (it != info_VkSemaphore.end()) {
5898 auto& semInfo = it->second;
5899#ifdef VK_USE_PLATFORM_FUCHSIA
5900 if (semInfo.eventHandle) {
5901 pre_signal_events.push_back(semInfo.eventHandle);
5902 pre_signal_semaphores.push_back(semaphore);
5903 }
5904#endif
5905#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5906 if (semInfo.syncFd.has_value()) {
5907 pre_signal_sync_fds.push_back(semInfo.syncFd.value());
5908 pre_signal_semaphores.push_back(semaphore);
5909 }
5910#endif
5911 }
5912 }
5913 for (uint32_t j = 0; j < getSignalSemaphoreCount(pSubmits[i]); ++j) {
5914 auto it = info_VkSemaphore.find(getSignalSemaphore(pSubmits[i], j));
5915 if (it != info_VkSemaphore.end()) {
5916 auto& semInfo = it->second;
5917#ifdef VK_USE_PLATFORM_FUCHSIA
5918 if (semInfo.eventHandle) {
5919 post_wait_events.push_back({semInfo.eventHandle, semInfo.eventKoid});
5920#ifndef FUCHSIA_NO_TRACE
5921 if (semInfo.eventKoid != ZX_KOID_INVALID) {
Mitchell Kemberd9f3e4a2024-01-29 16:53:33 -08005922 // TODO(fxbug.dev/42144867): Remove the "semaphore"
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07005923 // FLOW_END events once it is removed from clients
5924 // (for example, gfx Engine).
5925 TRACE_FLOW_END("gfx", "semaphore", semInfo.eventKoid);
5926 TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event", semInfo.eventKoid);
5927 }
5928#endif
5929 }
5930#endif
5931#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5932 if (semInfo.syncFd.value_or(-1) >= 0) {
5933 post_wait_sync_fds.push_back(semInfo.syncFd.value());
5934 }
5935#endif
5936 }
5937 }
5938 }
5939 lock.unlock();
5940
5941 if (pre_signal_semaphores.empty()) {
5942 input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
5943 if (input_result != VK_SUCCESS) return input_result;
5944 } else {
5945 // Schedule waits on the OS external objects and
5946 // signal the wait semaphores
5947 // in a separate thread.
5948 std::vector<WorkPool::Task> preSignalTasks;
5949 std::vector<WorkPool::Task> preSignalQueueSubmitTasks;
5950 ;
5951#ifdef VK_USE_PLATFORM_FUCHSIA
5952 for (auto event : pre_signal_events) {
5953 preSignalTasks.push_back([event] {
5954 zx_object_wait_one(event, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, nullptr);
5955 });
5956 }
5957#endif
5958#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5959 for (auto fd : pre_signal_sync_fds) {
5960 // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkImportSemaphoreFdInfoKHR.html
5961 // fd == -1 is treated as already signaled
5962 if (fd != -1) {
5963 preSignalTasks.push_back([fd] {
5964 auto* syncHelper =
5965 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5966 syncHelper->wait(fd, 3000);
5967 });
5968 }
5969 }
5970#endif
5971 if (!preSignalTasks.empty()) {
5972 auto waitGroupHandle = mWorkPool.schedule(preSignalTasks);
5973 mWorkPool.waitAll(waitGroupHandle);
5974 }
5975
5976 // Use the old version of VkSubmitInfo
5977 VkSubmitInfo submit_info = {
5978 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
5979 .waitSemaphoreCount = 0,
5980 .pWaitSemaphores = nullptr,
5981 .pWaitDstStageMask = nullptr,
5982 .signalSemaphoreCount = static_cast<uint32_t>(pre_signal_semaphores.size()),
5983 .pSignalSemaphores = pre_signal_semaphores.data()};
5984 vkQueueSubmitEnc(enc, queue, 1, &submit_info, VK_NULL_HANDLE);
5985 input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
5986 if (input_result != VK_SUCCESS) return input_result;
5987 }
5988 lock.lock();
5989 int externalFenceFdToSignal = -1;
5990
5991#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5992 if (fence != VK_NULL_HANDLE) {
5993 auto it = info_VkFence.find(fence);
5994 if (it != info_VkFence.end()) {
5995 const auto& info = it->second;
5996 if (info.syncFd >= 0) {
5997 externalFenceFdToSignal = info.syncFd;
5998 }
5999 }
6000 }
6001#endif
6002 if (externalFenceFdToSignal >= 0 || !post_wait_events.empty() || !post_wait_sync_fds.empty()) {
6003 std::vector<WorkPool::Task> tasks;
6004
6005 tasks.push_back([queue, externalFenceFdToSignal, post_wait_events /* copy of zx handles */,
6006 post_wait_sync_fds /* copy of sync fds */] {
6007 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
6008 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
6009 auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
6010#ifdef VK_USE_PLATFORM_FUCHSIA
6011 AEMU_SCOPED_TRACE("on_vkQueueSubmit::SignalSemaphores");
6012 (void)externalFenceFdToSignal;
6013 for (auto& [event, koid] : post_wait_events) {
6014#ifndef FUCHSIA_NO_TRACE
6015 if (koid != ZX_KOID_INVALID) {
6016 TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
6017 TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
6018 }
6019#endif
6020 zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
6021 }
6022#endif
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08006023#if GFXSTREAM_ENABLE_GUEST_GOLDFISH
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006024 for (auto& fd : post_wait_sync_fds) {
6025 goldfish_sync_signal(fd);
6026 }
6027
6028 if (externalFenceFdToSignal >= 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006029 mesa_logi("%s: external fence real signal: %d\n", __func__, externalFenceFdToSignal);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006030 goldfish_sync_signal(externalFenceFdToSignal);
6031 }
6032#endif
6033 });
6034 auto queueAsyncWaitHandle = mWorkPool.schedule(tasks);
6035 auto& queueWorkItems = mQueueSensitiveWorkPoolItems[queue];
6036 queueWorkItems.push_back(queueAsyncWaitHandle);
6037 }
6038 return VK_SUCCESS;
6039}
6040
6041VkResult ResourceTracker::on_vkQueueWaitIdle(void* context, VkResult, VkQueue queue) {
6042 VkEncoder* enc = (VkEncoder*)context;
6043
6044 AutoLock<RecursiveLock> lock(mLock);
6045 std::vector<WorkPool::WaitGroupHandle> toWait = mQueueSensitiveWorkPoolItems[queue];
6046 mQueueSensitiveWorkPoolItems[queue].clear();
6047 lock.unlock();
6048
6049 if (toWait.empty()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006050 mesa_logi("%s: No queue-specific work pool items\n", __func__);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006051 return enc->vkQueueWaitIdle(queue, true /* do lock */);
6052 }
6053
6054 for (auto handle : toWait) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006055 mesa_logi("%s: waiting on work group item: %llu\n", __func__, (unsigned long long)handle);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006056 mWorkPool.waitAll(handle);
6057 }
6058
6059 // now done waiting, get the host's opinion
6060 return enc->vkQueueWaitIdle(queue, true /* do lock */);
6061}
6062
6063#ifdef VK_USE_PLATFORM_ANDROID_KHR
6064void ResourceTracker::unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID* inputNativeInfo,
6065 VkNativeBufferANDROID* outputNativeInfo) {
6066 if (!inputNativeInfo || !inputNativeInfo->handle) {
6067 return;
6068 }
6069
6070 if (!outputNativeInfo || !outputNativeInfo) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006071 mesa_loge("FATAL: Local native buffer info not properly allocated!");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006072 abort();
6073 }
6074
6075 auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006076 const native_handle_t* nativeHandle = (const native_handle_t*)inputNativeInfo->handle;
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006077 *(uint32_t*)(outputNativeInfo->handle) = gralloc->getHostHandle(nativeHandle);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006078}
6079
6080void ResourceTracker::unwrap_VkBindImageMemorySwapchainInfoKHR(
6081 const VkBindImageMemorySwapchainInfoKHR* inputBimsi,
6082 VkBindImageMemorySwapchainInfoKHR* outputBimsi) {
6083 if (!inputBimsi || !inputBimsi->swapchain) {
6084 return;
6085 }
6086
6087 if (!outputBimsi || !outputBimsi->swapchain) {
Gurchetan Singhe7fc3572023-12-06 18:19:57 -08006088 return;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006089 }
6090
6091 // Android based swapchains are implemented by the Android framework's
6092 // libvulkan. The only exist within the guest and should not be sent to
6093 // the host.
6094 outputBimsi->swapchain = VK_NULL_HANDLE;
6095}
6096#endif
6097
6098void ResourceTracker::unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo* pCreateInfo,
6099 VkImageCreateInfo* local_pCreateInfo) {
6100#ifdef VK_USE_PLATFORM_ANDROID_KHR
6101 const VkNativeBufferANDROID* inputNativeInfo =
6102 vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
6103
6104 VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>(
6105 vk_find_struct<VkNativeBufferANDROID>(local_pCreateInfo));
6106
6107 unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6108#endif
6109}
6110
6111void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) {
6112#ifdef VK_USE_PLATFORM_ANDROID_KHR
6113 (void)fd_out;
6114 if (fd != -1) {
6115 AEMU_SCOPED_TRACE("waitNativeFenceInAcquire");
6116 // Implicit Synchronization
6117 auto* syncHelper =
6118 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
6119 syncHelper->wait(fd, 3000);
6120 // From libvulkan's swapchain.cpp:
6121 // """
6122 // NOTE: we're relying on AcquireImageANDROID to close fence_clone,
6123 // even if the call fails. We could close it ourselves on failure, but
6124 // that would create a race condition if the driver closes it on a
6125 // failure path: some other thread might create an fd with the same
6126 // number between the time the driver closes it and the time we close
6127 // it. We must assume one of: the driver *always* closes it even on
6128 // failure, or *never* closes it on failure.
6129 // """
6130 // Therefore, assume contract where we need to close fd in this driver
6131 syncHelper->close(fd);
6132 }
6133#endif
6134}
6135
6136void ResourceTracker::unwrap_VkBindImageMemory2_pBindInfos(
6137 uint32_t bindInfoCount, const VkBindImageMemoryInfo* inputBindInfos,
6138 VkBindImageMemoryInfo* outputBindInfos) {
6139#ifdef VK_USE_PLATFORM_ANDROID_KHR
6140 for (uint32_t i = 0; i < bindInfoCount; ++i) {
6141 const VkBindImageMemoryInfo* inputBindInfo = &inputBindInfos[i];
6142 VkBindImageMemoryInfo* outputBindInfo = &outputBindInfos[i];
6143
6144 const VkNativeBufferANDROID* inputNativeInfo =
6145 vk_find_struct<VkNativeBufferANDROID>(inputBindInfo);
6146
6147 VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>(
6148 vk_find_struct<VkNativeBufferANDROID>(outputBindInfo));
6149
6150 unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6151
6152 const VkBindImageMemorySwapchainInfoKHR* inputBimsi =
6153 vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(inputBindInfo);
6154
6155 VkBindImageMemorySwapchainInfoKHR* outputBimsi =
6156 const_cast<VkBindImageMemorySwapchainInfoKHR*>(
6157 vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(outputBindInfo));
6158
6159 unwrap_VkBindImageMemorySwapchainInfoKHR(inputBimsi, outputBimsi);
6160 }
6161#endif
6162}
6163
6164// Action of vkMapMemoryIntoAddressSpaceGOOGLE:
6165// 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre):
6166// uses address space device to reserve the right size of
6167// memory.
6168// 2. the reservation results in a physical address. the physical
6169// address is set as |*pAddress|.
6170// 3. after pre, the API call is encoded to the host, where the
6171// value of pAddress is also sent (the physical address).
6172// 4. the host will obtain the actual gpu pointer and send it
6173// back out in |*pAddress|.
6174// 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run,
6175// using the mmap() method of GoldfishAddressSpaceBlock to obtain
6176// a pointer in guest userspace corresponding to the host pointer.
6177VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void*, VkResult, VkDevice,
6178 VkDeviceMemory memory,
6179 uint64_t* pAddress) {
6180 AutoLock<RecursiveLock> lock(mLock);
6181
6182 auto it = info_VkDeviceMemory.find(memory);
6183 if (it == info_VkDeviceMemory.end()) {
6184 return VK_ERROR_OUT_OF_HOST_MEMORY;
6185 }
6186
6187#if defined(__ANDROID__)
6188 auto& memInfo = it->second;
6189
6190 GoldfishAddressSpaceBlockPtr block = std::make_shared<GoldfishAddressSpaceBlock>();
6191 block->allocate(mGoldfishAddressSpaceBlockProvider.get(), memInfo.coherentMemorySize);
6192
6193 memInfo.goldfishBlock = block;
6194 *pAddress = block->physAddr();
6195
6196 return VK_SUCCESS;
6197#else
6198 (void)pAddress;
6199 return VK_ERROR_MEMORY_MAP_FAILED;
6200#endif
6201}
6202
6203VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(void*, VkResult input_result,
6204 VkDevice, VkDeviceMemory memory,
6205 uint64_t* pAddress) {
6206 (void)memory;
6207 (void)pAddress;
6208
6209 if (input_result != VK_SUCCESS) {
6210 return input_result;
6211 }
6212
6213 return input_result;
6214}
6215
6216VkResult ResourceTracker::initDescriptorUpdateTemplateBuffers(
6217 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6218 VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
6219 AutoLock<RecursiveLock> lock(mLock);
6220
6221 auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6222 if (it == info_VkDescriptorUpdateTemplate.end()) {
6223 return VK_ERROR_INITIALIZATION_FAILED;
6224 }
6225
6226 auto& info = it->second;
6227 uint32_t inlineUniformBlockBufferSize = 0;
6228
6229 for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6230 const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6231 uint32_t descCount = entry.descriptorCount;
6232 VkDescriptorType descType = entry.descriptorType;
6233 ++info.templateEntryCount;
6234 if (isDescriptorTypeInlineUniformBlock(descType)) {
6235 inlineUniformBlockBufferSize += descCount;
6236 ++info.inlineUniformBlockCount;
6237 } else {
6238 for (uint32_t j = 0; j < descCount; ++j) {
6239 if (isDescriptorTypeImageInfo(descType)) {
6240 ++info.imageInfoCount;
6241 } else if (isDescriptorTypeBufferInfo(descType)) {
6242 ++info.bufferInfoCount;
6243 } else if (isDescriptorTypeBufferView(descType)) {
6244 ++info.bufferViewCount;
6245 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006246 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006247 // abort();
6248 }
6249 }
6250 }
6251 }
6252
6253 if (info.templateEntryCount)
6254 info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
6255
6256 if (info.imageInfoCount) {
6257 info.imageInfoIndices = new uint32_t[info.imageInfoCount];
6258 info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
6259 }
6260
6261 if (info.bufferInfoCount) {
6262 info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
6263 info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
6264 }
6265
6266 if (info.bufferViewCount) {
6267 info.bufferViewIndices = new uint32_t[info.bufferViewCount];
6268 info.bufferViews = new VkBufferView[info.bufferViewCount];
6269 }
6270
6271 if (info.inlineUniformBlockCount) {
6272 info.inlineUniformBlockBuffer.resize(inlineUniformBlockBufferSize);
6273 info.inlineUniformBlockBytesPerBlocks.resize(info.inlineUniformBlockCount);
6274 }
6275
6276 uint32_t imageInfoIndex = 0;
6277 uint32_t bufferInfoIndex = 0;
6278 uint32_t bufferViewIndex = 0;
6279 uint32_t inlineUniformBlockIndex = 0;
6280
6281 for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6282 const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6283 uint32_t descCount = entry.descriptorCount;
6284 VkDescriptorType descType = entry.descriptorType;
6285
6286 info.templateEntries[i] = entry;
6287
6288 if (isDescriptorTypeInlineUniformBlock(descType)) {
6289 info.inlineUniformBlockBytesPerBlocks[inlineUniformBlockIndex] = descCount;
6290 ++inlineUniformBlockIndex;
6291 } else {
6292 for (uint32_t j = 0; j < descCount; ++j) {
6293 if (isDescriptorTypeImageInfo(descType)) {
6294 info.imageInfoIndices[imageInfoIndex] = i;
6295 ++imageInfoIndex;
6296 } else if (isDescriptorTypeBufferInfo(descType)) {
6297 info.bufferInfoIndices[bufferInfoIndex] = i;
6298 ++bufferInfoIndex;
6299 } else if (isDescriptorTypeBufferView(descType)) {
6300 info.bufferViewIndices[bufferViewIndex] = i;
6301 ++bufferViewIndex;
6302 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006303 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006304 // abort();
6305 }
6306 }
6307 }
6308 }
6309
6310 return VK_SUCCESS;
6311}
6312
6313VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate(
6314 void* context, VkResult input_result, VkDevice device,
6315 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6316 const VkAllocationCallbacks* pAllocator,
6317 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6318 (void)context;
6319 (void)device;
6320 (void)pAllocator;
6321
6322 if (input_result != VK_SUCCESS) return input_result;
6323
6324 return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6325}
6326
6327VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR(
6328 void* context, VkResult input_result, VkDevice device,
6329 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6330 const VkAllocationCallbacks* pAllocator,
6331 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6332 (void)context;
6333 (void)device;
6334 (void)pAllocator;
6335
6336 if (input_result != VK_SUCCESS) return input_result;
6337
6338 return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6339}
6340
6341void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate(
6342 void* context, VkDevice device, VkDescriptorSet descriptorSet,
6343 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6344 VkEncoder* enc = (VkEncoder*)context;
6345
6346 uint8_t* userBuffer = (uint8_t*)pData;
6347 if (!userBuffer) return;
6348
6349 // TODO: Make this thread safe
6350 AutoLock<RecursiveLock> lock(mLock);
6351
6352 auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6353 if (it == info_VkDescriptorUpdateTemplate.end()) {
6354 return;
6355 }
6356
6357 auto& info = it->second;
6358
6359 uint32_t templateEntryCount = info.templateEntryCount;
6360 VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
6361
6362 uint32_t imageInfoCount = info.imageInfoCount;
6363 uint32_t bufferInfoCount = info.bufferInfoCount;
6364 uint32_t bufferViewCount = info.bufferViewCount;
6365 uint32_t inlineUniformBlockCount = info.inlineUniformBlockCount;
6366 uint32_t* imageInfoIndices = info.imageInfoIndices;
6367 uint32_t* bufferInfoIndices = info.bufferInfoIndices;
6368 uint32_t* bufferViewIndices = info.bufferViewIndices;
6369 VkDescriptorImageInfo* imageInfos = info.imageInfos;
6370 VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
6371 VkBufferView* bufferViews = info.bufferViews;
6372 uint8_t* inlineUniformBlockBuffer = info.inlineUniformBlockBuffer.data();
6373 uint32_t* inlineUniformBlockBytesPerBlocks = info.inlineUniformBlockBytesPerBlocks.data();
6374
6375 lock.unlock();
6376
6377 size_t currImageInfoOffset = 0;
6378 size_t currBufferInfoOffset = 0;
6379 size_t currBufferViewOffset = 0;
6380 size_t inlineUniformBlockOffset = 0;
6381 size_t inlineUniformBlockIdx = 0;
6382
6383 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
6384 ReifiedDescriptorSet* reified = ds->reified;
6385
6386 bool batched = mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate;
6387
6388 for (uint32_t i = 0; i < templateEntryCount; ++i) {
6389 const auto& entry = templateEntries[i];
6390 VkDescriptorType descType = entry.descriptorType;
6391 uint32_t dstBinding = entry.dstBinding;
6392
6393 auto offset = entry.offset;
6394 auto stride = entry.stride;
6395 auto dstArrayElement = entry.dstArrayElement;
6396
6397 uint32_t descCount = entry.descriptorCount;
6398
6399 if (isDescriptorTypeImageInfo(descType)) {
6400 if (!stride) stride = sizeof(VkDescriptorImageInfo);
6401
6402 const VkDescriptorImageInfo* currImageInfoBegin =
6403 (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
6404
6405 for (uint32_t j = 0; j < descCount; ++j) {
6406 const VkDescriptorImageInfo* user =
6407 (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
6408
6409 memcpy(((uint8_t*)imageInfos) + currImageInfoOffset, user,
6410 sizeof(VkDescriptorImageInfo));
6411 currImageInfoOffset += sizeof(VkDescriptorImageInfo);
6412 }
6413
6414 if (batched) {
6415 doEmulatedDescriptorImageInfoWriteFromTemplate(
6416 descType, dstBinding, dstArrayElement, descCount, currImageInfoBegin, reified);
6417 }
6418 } else if (isDescriptorTypeBufferInfo(descType)) {
6419 if (!stride) stride = sizeof(VkDescriptorBufferInfo);
6420
6421 const VkDescriptorBufferInfo* currBufferInfoBegin =
6422 (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
6423
6424 for (uint32_t j = 0; j < descCount; ++j) {
6425 const VkDescriptorBufferInfo* user =
6426 (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
6427
6428 memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset, user,
6429 sizeof(VkDescriptorBufferInfo));
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006430#if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
6431 // Convert mesa to internal for objects in the user buffer
6432 VkDescriptorBufferInfo* internalBufferInfo =
6433 (VkDescriptorBufferInfo*)(((uint8_t*)bufferInfos) + currBufferInfoOffset);
6434 VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstream_buffer, internalBufferInfo->buffer);
6435 internalBufferInfo->buffer = gfxstream_buffer->internal_object;
6436#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006437 currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
6438 }
6439
6440 if (batched) {
6441 doEmulatedDescriptorBufferInfoWriteFromTemplate(
6442 descType, dstBinding, dstArrayElement, descCount, currBufferInfoBegin, reified);
6443 }
6444
6445 } else if (isDescriptorTypeBufferView(descType)) {
6446 if (!stride) stride = sizeof(VkBufferView);
6447
6448 const VkBufferView* currBufferViewBegin =
6449 (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
6450
6451 for (uint32_t j = 0; j < descCount; ++j) {
6452 const VkBufferView* user = (const VkBufferView*)(userBuffer + offset + j * stride);
6453
6454 memcpy(((uint8_t*)bufferViews) + currBufferViewOffset, user, sizeof(VkBufferView));
6455 currBufferViewOffset += sizeof(VkBufferView);
6456 }
6457
6458 if (batched) {
6459 doEmulatedDescriptorBufferViewWriteFromTemplate(
6460 descType, dstBinding, dstArrayElement, descCount, currBufferViewBegin, reified);
6461 }
6462 } else if (isDescriptorTypeInlineUniformBlock(descType)) {
6463 uint32_t inlineUniformBlockBytesPerBlock =
6464 inlineUniformBlockBytesPerBlocks[inlineUniformBlockIdx];
6465 uint8_t* currInlineUniformBlockBufferBegin =
6466 inlineUniformBlockBuffer + inlineUniformBlockOffset;
6467 memcpy(currInlineUniformBlockBufferBegin, userBuffer + offset,
6468 inlineUniformBlockBytesPerBlock);
6469 inlineUniformBlockIdx++;
6470 inlineUniformBlockOffset += inlineUniformBlockBytesPerBlock;
6471
6472 if (batched) {
6473 doEmulatedDescriptorInlineUniformBlockFromTemplate(
6474 descType, dstBinding, dstArrayElement, descCount,
6475 currInlineUniformBlockBufferBegin, reified);
6476 }
6477 } else {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006478 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006479 abort();
6480 }
6481 }
6482
6483 if (batched) return;
6484
6485 enc->vkUpdateDescriptorSetWithTemplateSized2GOOGLE(
6486 device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount,
6487 bufferViewCount, static_cast<uint32_t>(info.inlineUniformBlockBuffer.size()),
6488 imageInfoIndices, bufferInfoIndices, bufferViewIndices, imageInfos, bufferInfos,
6489 bufferViews, inlineUniformBlockBuffer, true /* do lock */);
6490}
6491
Jean-Francois Thibertf884afd2024-01-31 10:15:04 -05006492void ResourceTracker::on_vkUpdateDescriptorSetWithTemplateKHR(
6493 void* context, VkDevice device, VkDescriptorSet descriptorSet,
6494 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6495 on_vkUpdateDescriptorSetWithTemplate(context, device, descriptorSet, descriptorUpdateTemplate,
6496 pData);
6497}
6498
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006499VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2_common(
6500 bool isKhr, void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6501 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6502 VkImageFormatProperties2* pImageFormatProperties) {
6503 VkEncoder* enc = (VkEncoder*)context;
6504 (void)input_result;
6505
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006506 uint32_t supportedHandleType = 0;
6507 VkExternalImageFormatProperties* ext_img_properties =
6508 vk_find_struct<VkExternalImageFormatProperties>(pImageFormatProperties);
6509
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006510#ifdef VK_USE_PLATFORM_FUCHSIA
6511
6512 constexpr VkFormat kExternalImageSupportedFormats[] = {
6513 VK_FORMAT_B8G8R8A8_SINT, VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_B8G8R8A8_SRGB,
6514 VK_FORMAT_B8G8R8A8_SNORM, VK_FORMAT_B8G8R8A8_SSCALED, VK_FORMAT_B8G8R8A8_USCALED,
6515 VK_FORMAT_R8G8B8A8_SINT, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_SRGB,
6516 VK_FORMAT_R8G8B8A8_SNORM, VK_FORMAT_R8G8B8A8_SSCALED, VK_FORMAT_R8G8B8A8_USCALED,
6517 VK_FORMAT_R8_UNORM, VK_FORMAT_R8_UINT, VK_FORMAT_R8_USCALED,
6518 VK_FORMAT_R8_SNORM, VK_FORMAT_R8_SINT, VK_FORMAT_R8_SSCALED,
6519 VK_FORMAT_R8_SRGB, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8_UINT,
6520 VK_FORMAT_R8G8_USCALED, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8_SINT,
6521 VK_FORMAT_R8G8_SSCALED, VK_FORMAT_R8G8_SRGB,
6522 };
6523
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006524 if (ext_img_properties) {
6525 if (std::find(std::begin(kExternalImageSupportedFormats),
6526 std::end(kExternalImageSupportedFormats),
6527 pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
6528 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6529 }
6530 }
C Stout5a3a4222023-11-14 16:31:56 -08006531 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006532#endif
6533
6534#ifdef VK_USE_PLATFORM_ANDROID_KHR
6535 VkAndroidHardwareBufferUsageANDROID* output_ahw_usage =
6536 vk_find_struct<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties);
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006537 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006538 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006539#endif
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006540 const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
6541 vk_find_struct<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo);
6542 if (supportedHandleType && ext_img_info) {
6543 // 0 is a valid handleType so we don't check against 0
6544 if (ext_img_info->handleType != (ext_img_info->handleType & supportedHandleType)) {
6545 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6546 }
6547 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006548
6549 VkResult hostRes;
6550
6551 if (isKhr) {
6552 hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
6553 physicalDevice, pImageFormatInfo, pImageFormatProperties, true /* do lock */);
6554 } else {
6555 hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
6556 physicalDevice, pImageFormatInfo, pImageFormatProperties, true /* do lock */);
6557 }
6558
6559 if (hostRes != VK_SUCCESS) return hostRes;
6560
6561#ifdef VK_USE_PLATFORM_FUCHSIA
6562 if (ext_img_properties) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006563 if (ext_img_info) {
6564 if (static_cast<uint32_t>(ext_img_info->handleType) ==
6565 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
6566 ext_img_properties->externalMemoryProperties = {
6567 .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6568 VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6569 .exportFromImportedHandleTypes =
6570 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6571 .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6572 };
6573 }
6574 }
6575 }
6576#endif
6577
6578#ifdef VK_USE_PLATFORM_ANDROID_KHR
6579 if (output_ahw_usage) {
6580 output_ahw_usage->androidHardwareBufferUsage = getAndroidHardwareBufferUsageFromVkUsage(
6581 pImageFormatInfo->flags, pImageFormatInfo->usage);
6582 }
6583#endif
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006584 if (ext_img_properties) {
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006585 transformImpl_VkExternalMemoryProperties_fromhost(
6586 &ext_img_properties->externalMemoryProperties, 0);
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006587 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006588 return hostRes;
6589}
6590
6591VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2(
6592 void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6593 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6594 VkImageFormatProperties2* pImageFormatProperties) {
6595 return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6596 false /* not KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6597 pImageFormatProperties);
6598}
6599
6600VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR(
6601 void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6602 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6603 VkImageFormatProperties2* pImageFormatProperties) {
6604 return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6605 true /* is KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6606 pImageFormatProperties);
6607}
6608
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006609void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties_common(
6610 bool isKhr, void* context, VkPhysicalDevice physicalDevice,
6611 const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6612 VkExternalBufferProperties* pExternalBufferProperties) {
6613 VkEncoder* enc = (VkEncoder*)context;
6614
Jason Macnakabe57a82024-02-02 17:02:21 -08006615#if defined(ANDROID)
Yahan Zhou79ab57a2023-09-14 16:24:26 -07006616 // Older versions of Goldfish's Gralloc did not support allocating AHARDWAREBUFFER_FORMAT_BLOB
6617 // with GPU usage (b/299520213).
6618 if (ResourceTracker::threadingCallbacks.hostConnectionGetFunc()
6619 ->grallocHelper()
6620 ->treatBlobAsImage() &&
6621 pExternalBufferInfo->handleType ==
6622 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) {
6623 pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0;
6624 pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0;
6625 pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0;
6626 return;
6627 }
Jason Macnakabe57a82024-02-02 17:02:21 -08006628#endif
Yahan Zhou79ab57a2023-09-14 16:24:26 -07006629
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006630 uint32_t supportedHandleType = 0;
6631#ifdef VK_USE_PLATFORM_FUCHSIA
C Stout5a3a4222023-11-14 16:31:56 -08006632 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006633#endif
6634#ifdef VK_USE_PLATFORM_ANDROID_KHR
6635 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006636 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006637#endif
6638 if (supportedHandleType) {
6639 // 0 is a valid handleType so we can't check against 0
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006640 if (pExternalBufferInfo->handleType !=
6641 (pExternalBufferInfo->handleType & supportedHandleType)) {
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006642 return;
6643 }
6644 }
6645
6646 if (isKhr) {
6647 enc->vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6648 physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6649 } else {
6650 enc->vkGetPhysicalDeviceExternalBufferProperties(
6651 physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6652 }
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006653 transformImpl_VkExternalMemoryProperties_fromhost(
6654 &pExternalBufferProperties->externalMemoryProperties, 0);
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006655}
6656
6657void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties(
6658 void* context, VkPhysicalDevice physicalDevice,
6659 const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6660 VkExternalBufferProperties* pExternalBufferProperties) {
6661 return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6662 false /* not KHR */, context, physicalDevice, pExternalBufferInfo,
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006663 pExternalBufferProperties);
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006664}
6665
6666void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6667 void* context, VkPhysicalDevice physicalDevice,
6668 const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo,
6669 VkExternalBufferPropertiesKHR* pExternalBufferProperties) {
6670 return on_vkGetPhysicalDeviceExternalBufferProperties_common(
Gurchetan Singhc4444b82023-09-19 08:06:20 -07006671 true /* is KHR */, context, physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
Andrew Woloszyn416d0a12023-10-04 17:02:19 -04006672}
6673
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006674void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6675 void*, VkPhysicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6676 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6677 (void)pExternalSemaphoreInfo;
6678 (void)pExternalSemaphoreProperties;
6679#ifdef VK_USE_PLATFORM_FUCHSIA
6680 if (pExternalSemaphoreInfo->handleType ==
6681 static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
6682 pExternalSemaphoreProperties->compatibleHandleTypes |=
6683 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6684 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6685 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6686 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6687 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6688 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6689 }
6690#else
Yahan Zhou8a4505f2023-10-11 11:05:04 -07006691 const VkSemaphoreTypeCreateInfo* semaphoreTypeCi =
6692 vk_find_struct<VkSemaphoreTypeCreateInfo>(pExternalSemaphoreInfo);
6693 bool isSemaphoreTimeline =
6694 semaphoreTypeCi != nullptr && semaphoreTypeCi->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE;
6695 if (isSemaphoreTimeline) {
6696 // b/304373623
6697 // dEQP-VK.api.external.semaphore.sync_fd#info_timeline
6698 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
6699 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
6700 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
6701 } else if (pExternalSemaphoreInfo->handleType ==
6702 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006703 pExternalSemaphoreProperties->compatibleHandleTypes |=
6704 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6705 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6706 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6707 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6708 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6709 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6710 }
6711#endif // VK_USE_PLATFORM_FUCHSIA
6712}
6713
6714void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
6715 void* context, VkPhysicalDevice physicalDevice,
6716 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6717 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6718 on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6719 context, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
6720}
6721
6722void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* object,
6723 CleanupCallback callback) {
6724 AutoLock<RecursiveLock> lock(mLock);
6725 auto& callbacks = mEncoderCleanupCallbacks[encoder];
6726 callbacks[object] = callback;
6727}
6728
6729void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
6730 AutoLock<RecursiveLock> lock(mLock);
6731 mEncoderCleanupCallbacks[encoder].erase(object);
6732}
6733
6734void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
6735 AutoLock<RecursiveLock> lock(mLock);
6736 if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
6737
6738 std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
6739
6740 mEncoderCleanupCallbacks.erase(encoder);
6741 lock.unlock();
6742
6743 for (auto it : callbackCopies) {
6744 it.second();
6745 }
6746}
6747
6748CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() {
6749 if (mFeatureInfo->hasVulkanAuxCommandMemory) {
6750 return [this](size_t size) -> CommandBufferStagingStream::Memory {
6751 VkMemoryAllocateInfo info{
6752 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
6753 .pNext = nullptr,
6754 .allocationSize = size,
6755 .memoryTypeIndex = VK_MAX_MEMORY_TYPES // indicates auxiliary memory
6756 };
6757
6758 auto enc = ResourceTracker::getThreadLocalEncoder();
6759 VkDevice device = VK_NULL_HANDLE;
6760 VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE;
6761 VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem);
6762 if (result != VK_SUCCESS) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006763 mesa_loge("Failed to get coherent memory %u", result);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006764 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6765 }
6766
6767 // getCoherentMemory() uses suballocations.
6768 // To retrieve the suballocated memory address, look up
6769 // VkDeviceMemory filled in by getCoherentMemory()
6770 // scope of mLock
6771 {
6772 AutoLock<RecursiveLock> lock(mLock);
6773 const auto it = info_VkDeviceMemory.find(vkDeviceMem);
6774 if (it == info_VkDeviceMemory.end()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006775 mesa_loge("Coherent memory allocated %u not found", result);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006776 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
Shalini Sdb704c92023-01-27 21:35:33 +00006777 };
6778
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006779 const auto& info = it->second;
6780 return {.deviceMemory = vkDeviceMem, .ptr = info.ptr};
6781 }
6782 };
6783 }
6784 return nullptr;
6785}
Shalini Sdb704c92023-01-27 21:35:33 +00006786
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006787CommandBufferStagingStream::Free ResourceTracker::getFree() {
6788 if (mFeatureInfo->hasVulkanAuxCommandMemory) {
6789 return [this](const CommandBufferStagingStream::Memory& memory) {
6790 // deviceMemory may not be the actual backing auxiliary VkDeviceMemory
6791 // for suballocations, deviceMemory is a alias VkDeviceMemory hand;
6792 // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory
6793 VkDeviceMemory deviceMemory = memory.deviceMemory;
6794 AutoLock<RecursiveLock> lock(mLock);
6795 auto it = info_VkDeviceMemory.find(deviceMemory);
6796 if (it == info_VkDeviceMemory.end()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07006797 mesa_loge("Device memory to free not found");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006798 return;
6799 }
6800 auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second);
6801 // We have to release the lock before we could possibly free a
6802 // CoherentMemory, because that will call into VkEncoder, which
6803 // shouldn't be called when the lock is held.
6804 lock.unlock();
6805 coherentMemory = nullptr;
6806 };
6807 }
6808 return nullptr;
6809}
Shalini Sdb704c92023-01-27 21:35:33 +00006810
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006811VkResult ResourceTracker::on_vkBeginCommandBuffer(void* context, VkResult input_result,
6812 VkCommandBuffer commandBuffer,
6813 const VkCommandBufferBeginInfo* pBeginInfo) {
6814 (void)context;
6815
6816 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
6817 true /* also clear pending descriptor sets */);
6818
6819 VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
6820 (void)input_result;
6821
6822 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
6823 cb->flags = pBeginInfo->flags;
6824
6825 VkCommandBufferBeginInfo modifiedBeginInfo;
6826
6827 if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
6828 modifiedBeginInfo = *pBeginInfo;
6829 modifiedBeginInfo.pInheritanceInfo = nullptr;
6830 pBeginInfo = &modifiedBeginInfo;
Shalini Sdb704c92023-01-27 21:35:33 +00006831 }
6832
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006833 if (!supportsDeferredCommands()) {
6834 return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
Shalini Sdb704c92023-01-27 21:35:33 +00006835 }
6836
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006837 enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
Lingfeng Yangc53e7472019-03-27 08:50:55 -07006838
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006839 return VK_SUCCESS;
6840}
Lingfeng Yang967f9af2021-01-22 17:56:24 -08006841
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006842VkResult ResourceTracker::on_vkEndCommandBuffer(void* context, VkResult input_result,
6843 VkCommandBuffer commandBuffer) {
6844 VkEncoder* enc = (VkEncoder*)context;
6845 (void)input_result;
Lingfeng Yang967f9af2021-01-22 17:56:24 -08006846
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006847 if (!supportsDeferredCommands()) {
6848 return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
Lingfeng Yangc53e7472019-03-27 08:50:55 -07006849 }
6850
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006851 enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
Lingfeng Yangc53e7472019-03-27 08:50:55 -07006852
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006853 return VK_SUCCESS;
6854}
Lingfeng Yangc53e7472019-03-27 08:50:55 -07006855
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006856VkResult ResourceTracker::on_vkResetCommandBuffer(void* context, VkResult input_result,
6857 VkCommandBuffer commandBuffer,
6858 VkCommandBufferResetFlags flags) {
Jean-Francois Thiberted8433a2024-05-07 15:14:55 -04006859 VkEncoder* enc = (VkEncoder*)context;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006860 (void)input_result;
Lingfeng Yang39a276e2019-06-17 13:27:22 -07006861
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006862 if (!supportsDeferredCommands()) {
Jean-Francois Thiberted8433a2024-05-07 15:14:55 -04006863 VkResult res = enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
6864 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
6865 true /* also clear pending descriptor sets */);
6866 return res;
Lingfeng Yangc53e7472019-03-27 08:50:55 -07006867 }
6868
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006869 enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
Jean-Francois Thiberted8433a2024-05-07 15:14:55 -04006870 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
6871 true /* also clear pending descriptor sets */);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006872 return VK_SUCCESS;
6873}
Lingfeng Yangc53e7472019-03-27 08:50:55 -07006874
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006875VkResult ResourceTracker::on_vkCreateImageView(void* context, VkResult input_result,
6876 VkDevice device,
6877 const VkImageViewCreateInfo* pCreateInfo,
6878 const VkAllocationCallbacks* pAllocator,
6879 VkImageView* pView) {
6880 VkEncoder* enc = (VkEncoder*)context;
6881 (void)input_result;
Lingfeng Yang967f9af2021-01-22 17:56:24 -08006882
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006883 VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
6884 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
Roman Kiryanov6db11e52019-04-26 14:18:14 -07006885
Gurchetan Singh24e2da12022-06-02 16:52:49 -07006886#if defined(VK_USE_PLATFORM_ANDROID_KHR)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006887 if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
6888 AutoLock<RecursiveLock> lock(mLock);
Kaiyi Lia713b412021-09-20 07:03:01 -07006889
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006890 auto it = info_VkImage.find(pCreateInfo->image);
6891 if (it != info_VkImage.end() && it->second.hasExternalFormat) {
Sergiuad918472024-05-21 16:28:45 +01006892 localCreateInfo.format = vk_format_from_fourcc(it->second.externalFourccFormat);
Kaiyi Lia713b412021-09-20 07:03:01 -07006893 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006894 }
6895 VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
6896 const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
6897 vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
6898 if (samplerYcbcrConversionInfo) {
6899 if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
6900 localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
6901 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
Roman Kiryanov6db11e52019-04-26 14:18:14 -07006902 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006903 }
Roman Kiryanov6db11e52019-04-26 14:18:14 -07006904#endif
6905
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006906 return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
6907}
6908
6909void ResourceTracker::on_vkCmdExecuteCommands(void* context, VkCommandBuffer commandBuffer,
6910 uint32_t commandBufferCount,
6911 const VkCommandBuffer* pCommandBuffers) {
6912 VkEncoder* enc = (VkEncoder*)context;
6913
6914 if (!mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
6915 enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
6916 true /* do lock */);
6917 return;
Roman Kiryanov6db11e52019-04-26 14:18:14 -07006918 }
6919
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006920 struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
6921 for (uint32_t i = 0; i < commandBufferCount; ++i) {
6922 struct goldfish_VkCommandBuffer* secondary =
6923 as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
6924 appendObject(&secondary->superObjects, primary);
6925 appendObject(&primary->subObjects, secondary);
Lingfeng Yang967f9af2021-01-22 17:56:24 -08006926 }
6927
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006928 enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
6929 true /* do lock */);
6930}
Lingfeng Yangf8cdd8b2021-02-08 11:32:45 -08006931
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006932void ResourceTracker::on_vkCmdBindDescriptorSets(void* context, VkCommandBuffer commandBuffer,
6933 VkPipelineBindPoint pipelineBindPoint,
6934 VkPipelineLayout layout, uint32_t firstSet,
6935 uint32_t descriptorSetCount,
6936 const VkDescriptorSet* pDescriptorSets,
6937 uint32_t dynamicOffsetCount,
6938 const uint32_t* pDynamicOffsets) {
6939 VkEncoder* enc = (VkEncoder*)context;
Lingfeng Yangf8cdd8b2021-02-08 11:32:45 -08006940
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006941 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)
6942 addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
Lingfeng Yangf8cdd8b2021-02-08 11:32:45 -08006943
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006944 enc->vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet,
6945 descriptorSetCount, pDescriptorSets, dynamicOffsetCount,
6946 pDynamicOffsets, true /* do lock */);
6947}
Lingfeng Yangf8cdd8b2021-02-08 11:32:45 -08006948
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006949void ResourceTracker::on_vkCmdPipelineBarrier(
6950 void* context, VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
6951 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6952 uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
6953 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
6954 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) {
6955 VkEncoder* enc = (VkEncoder*)context;
Lingfeng Yang55676e02021-02-08 08:39:45 -08006956
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006957 std::vector<VkImageMemoryBarrier> updatedImageMemoryBarriers;
6958 updatedImageMemoryBarriers.reserve(imageMemoryBarrierCount);
6959 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
6960 VkImageMemoryBarrier barrier = pImageMemoryBarriers[i];
Jason Macnake6704de2022-05-27 09:41:20 -07006961
6962#ifdef VK_USE_PLATFORM_ANDROID_KHR
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006963 // Unfortunetly, Android does not yet have a mechanism for sharing the expected
6964 // VkImageLayout when passing around AHardwareBuffer-s so many existing users
6965 // that import AHardwareBuffer-s into VkImage-s/VkDeviceMemory-s simply use
6966 // VK_IMAGE_LAYOUT_UNDEFINED. However, the Vulkan spec's image layout transition
6967 // sections says "If the old layout is VK_IMAGE_LAYOUT_UNDEFINED, the contents of
6968 // that range may be discarded." Some Vulkan drivers have been observed to actually
6969 // perform the discard which leads to AHardwareBuffer-s being unintentionally
6970 // cleared. See go/ahb-vkimagelayout for more information.
6971 if (barrier.srcQueueFamilyIndex != barrier.dstQueueFamilyIndex &&
6972 (barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
6973 barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) &&
6974 barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
6975 // This is not a complete solution as the Vulkan spec does not require that
6976 // Vulkan drivers perform a no-op in the case when oldLayout equals newLayout
6977 // but this has been observed to be enough to work for now to avoid clearing
6978 // out images.
6979 // TODO(b/236179843): figure out long term solution.
6980 barrier.oldLayout = barrier.newLayout;
6981 }
Jason Macnake6704de2022-05-27 09:41:20 -07006982#endif
6983
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006984 updatedImageMemoryBarriers.push_back(barrier);
Jason Macnake6704de2022-05-27 09:41:20 -07006985 }
6986
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006987 enc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
6988 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6989 pBufferMemoryBarriers, updatedImageMemoryBarriers.size(),
6990 updatedImageMemoryBarriers.data(), true /* do lock */);
6991}
Lingfeng Yanga4ae0522021-02-17 14:12:19 -08006992
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006993void ResourceTracker::on_vkDestroyDescriptorSetLayout(void* context, VkDevice device,
6994 VkDescriptorSetLayout descriptorSetLayout,
6995 const VkAllocationCallbacks* pAllocator) {
6996 decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
6997}
Lingfeng Yanga4ae0522021-02-17 14:12:19 -08006998
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07006999VkResult ResourceTracker::on_vkAllocateCommandBuffers(
7000 void* context, VkResult input_result, VkDevice device,
7001 const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) {
7002 (void)input_result;
Lingfeng Yanga4ae0522021-02-17 14:12:19 -08007003
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007004 VkEncoder* enc = (VkEncoder*)context;
7005 VkResult res =
7006 enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
7007 if (VK_SUCCESS != res) return res;
7008
7009 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
7010 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7011 cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
7012 cb->device = device;
Lingfeng Yanga4ae0522021-02-17 14:12:19 -08007013 }
7014
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007015 return res;
7016}
Lingfeng Yang97f51af2021-04-22 12:24:11 -07007017
Gurchetan Singh72a67582022-09-09 13:52:16 -07007018#if defined(VK_USE_PLATFORM_ANDROID_KHR)
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007019VkResult ResourceTracker::exportSyncFdForQSRILocked(VkImage image, int* fd) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07007020 mesa_logi("%s: call for image %p hos timage handle 0x%llx\n", __func__, (void*)image,
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007021 (unsigned long long)get_host_u64_VkImage(image));
Gurchetan Singh72a67582022-09-09 13:52:16 -07007022
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007023 if (mFeatureInfo->hasVirtioGpuNativeSync) {
7024 struct VirtGpuExecBuffer exec = {};
7025 struct gfxstreamCreateQSRIExportVK exportQSRI = {};
7026 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
Gurchetan Singh72a67582022-09-09 13:52:16 -07007027
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007028 uint64_t hostImageHandle = get_host_u64_VkImage(image);
Gurchetan Singh72a67582022-09-09 13:52:16 -07007029
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007030 exportQSRI.hdr.opCode = GFXSTREAM_CREATE_QSRI_EXPORT_VK;
7031 exportQSRI.imageHandleLo = (uint32_t)hostImageHandle;
7032 exportQSRI.imageHandleHi = (uint32_t)(hostImageHandle >> 32);
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007033
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007034 exec.command = static_cast<void*>(&exportQSRI);
7035 exec.command_size = sizeof(exportQSRI);
7036 exec.flags = kFenceOut | kRingIdx;
7037 if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007038
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007039 *fd = exec.handle.osHandle;
7040 } else {
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08007041#if GFXSTREAM_ENABLE_GUEST_GOLDFISH
Yahan Zhou8d5457f2023-10-04 15:48:27 -07007042 ensureSyncDeviceFd();
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007043 goldfish_sync_queue_work(
7044 mSyncDeviceFd, get_host_u64_VkImage(image) /* the handle */,
7045 GOLDFISH_SYNC_VULKAN_QSRI /* thread handle (doubling as type field) */, fd);
Gurchetan Singhb7feebd2024-01-23 14:12:36 -08007046#endif
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007047 }
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007048
Gurchetan Singh42361f72024-05-16 17:37:11 -07007049 mesa_logi("%s: got fd: %d\n", __func__, *fd);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007050 auto imageInfoIt = info_VkImage.find(image);
7051 if (imageInfoIt != info_VkImage.end()) {
7052 auto& imageInfo = imageInfoIt->second;
7053
7054 auto* syncHelper =
7055 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
7056
7057 // Remove any pending QSRI sync fds that are already signaled.
7058 auto syncFdIt = imageInfo.pendingQsriSyncFds.begin();
7059 while (syncFdIt != imageInfo.pendingQsriSyncFds.end()) {
7060 int syncFd = *syncFdIt;
7061 int syncWaitRet = syncHelper->wait(syncFd, /*timeout msecs*/ 0);
7062 if (syncWaitRet == 0) {
7063 // Sync fd is signaled.
7064 syncFdIt = imageInfo.pendingQsriSyncFds.erase(syncFdIt);
7065 syncHelper->close(syncFd);
7066 } else {
7067 if (errno != ETIME) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07007068 mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
7069 __func__, strerror(errno), errno);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007070 }
7071 break;
7072 }
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007073 }
Jason Macnak119ec5d2022-06-23 16:18:33 -07007074
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007075 int syncFdDup = syncHelper->dup(*fd);
7076 if (syncFdDup < 0) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07007077 mesa_loge("%s: Failed to dup() QSRI sync fd : sterror: %s errno: %d", __func__,
7078 strerror(errno), errno);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007079 } else {
7080 imageInfo.pendingQsriSyncFds.push_back(syncFdDup);
7081 }
7082 }
Jason Macnak119ec5d2022-06-23 16:18:33 -07007083
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007084 return VK_SUCCESS;
7085}
Jason Macnaka194bbf2023-07-20 10:21:13 -07007086
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007087VkResult ResourceTracker::on_vkQueueSignalReleaseImageANDROID(void* context, VkResult input_result,
7088 VkQueue queue,
7089 uint32_t waitSemaphoreCount,
7090 const VkSemaphore* pWaitSemaphores,
7091 VkImage image, int* pNativeFenceFd) {
7092 (void)input_result;
7093
7094 VkEncoder* enc = (VkEncoder*)context;
7095
7096 if (!mFeatureInfo->hasVulkanAsyncQsri) {
7097 return enc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores,
7098 image, pNativeFenceFd, true /* lock */);
7099 }
7100
7101 {
7102 AutoLock<RecursiveLock> lock(mLock);
7103 auto it = info_VkImage.find(image);
7104 if (it == info_VkImage.end()) {
7105 if (pNativeFenceFd) *pNativeFenceFd = -1;
7106 return VK_ERROR_INITIALIZATION_FAILED;
7107 }
7108 }
7109
7110 enc->vkQueueSignalReleaseImageANDROIDAsyncGOOGLE(queue, waitSemaphoreCount, pWaitSemaphores,
7111 image, true /* lock */);
7112
7113 AutoLock<RecursiveLock> lock(mLock);
7114 VkResult result;
7115 if (pNativeFenceFd) {
7116 result = exportSyncFdForQSRILocked(image, pNativeFenceFd);
7117 } else {
7118 int syncFd;
7119 result = exportSyncFdForQSRILocked(image, &syncFd);
7120
7121 if (syncFd >= 0) {
7122 auto* syncHelper =
7123 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
7124 syncHelper->close(syncFd);
7125 }
7126 }
7127
7128 return result;
7129}
7130#endif
7131
7132VkResult ResourceTracker::on_vkCreateGraphicsPipelines(
7133 void* context, VkResult input_result, VkDevice device, VkPipelineCache pipelineCache,
7134 uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos,
7135 const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) {
7136 (void)input_result;
7137 VkEncoder* enc = (VkEncoder*)context;
7138 std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(pCreateInfos,
7139 pCreateInfos + createInfoCount);
7140 for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) {
7141 // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics
7142 bool requireViewportState = false;
7143 // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750
7144 requireViewportState |=
7145 graphicsPipelineCreateInfo.pRasterizationState != nullptr &&
7146 graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable == VK_FALSE;
7147 // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892
7148#ifdef VK_EXT_extended_dynamic_state2
7149 if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) {
7150 for (uint32_t i = 0; i < graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount;
7151 i++) {
7152 if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT ==
7153 graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) {
7154 requireViewportState = true;
Jason Macnak119ec5d2022-06-23 16:18:33 -07007155 break;
7156 }
7157 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007158 }
7159#endif // VK_EXT_extended_dynamic_state2
7160 if (!requireViewportState) {
7161 graphicsPipelineCreateInfo.pViewportState = nullptr;
Jason Macnak119ec5d2022-06-23 16:18:33 -07007162 }
Jason Macnak119ec5d2022-06-23 16:18:33 -07007163
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007164 // It has the same requirement as for pViewportState.
7165 bool shouldIncludeFragmentShaderState = requireViewportState;
7166
7167 // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
7168 if (!shouldIncludeFragmentShaderState) {
7169 graphicsPipelineCreateInfo.pMultisampleState = nullptr;
7170 }
7171
Andrew Woloszyncb3aaad2023-10-06 15:22:33 -04007172 bool forceDepthStencilState = false;
7173 bool forceColorBlendState = false;
7174
7175 const VkPipelineRenderingCreateInfo* pipelineRenderingInfo =
7176 vk_find_struct<VkPipelineRenderingCreateInfo>(&graphicsPipelineCreateInfo);
7177
7178 if (pipelineRenderingInfo) {
Gurchetan Singhc4444b82023-09-19 08:06:20 -07007179 forceDepthStencilState |=
7180 pipelineRenderingInfo->depthAttachmentFormat != VK_FORMAT_UNDEFINED;
7181 forceDepthStencilState |=
7182 pipelineRenderingInfo->stencilAttachmentFormat != VK_FORMAT_UNDEFINED;
Andrew Woloszyncb3aaad2023-10-06 15:22:33 -04007183 forceColorBlendState |= pipelineRenderingInfo->colorAttachmentCount != 0;
7184 }
7185
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007186 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
7187 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044
7188 if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE ||
7189 !shouldIncludeFragmentShaderState) {
Andrew Woloszyncb3aaad2023-10-06 15:22:33 -04007190 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06053
7191 if (!forceDepthStencilState) {
7192 graphicsPipelineCreateInfo.pDepthStencilState = nullptr;
7193 }
7194 if (!forceColorBlendState) {
7195 graphicsPipelineCreateInfo.pColorBlendState = nullptr;
7196 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007197 }
7198 }
7199 return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(),
7200 localCreateInfos.data(), pAllocator, pPipelines,
7201 true /* do lock */);
7202}
7203
7204uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) const {
7205 AutoLock<RecursiveLock> lock(mLock);
7206 uint32_t api = kDefaultApiVersion;
7207
7208 auto it = info_VkInstance.find(instance);
7209 if (it == info_VkInstance.end()) return api;
7210
7211 api = it->second.highestApiVersion;
7212
7213 return api;
7214}
7215
7216uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) const {
7217 AutoLock<RecursiveLock> lock(mLock);
7218
7219 uint32_t api = kDefaultApiVersion;
7220
7221 auto it = info_VkDevice.find(device);
7222 if (it == info_VkDevice.end()) return api;
7223
7224 api = it->second.apiVersion;
7225
7226 return api;
7227}
7228
7229bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string& name) const {
7230 AutoLock<RecursiveLock> lock(mLock);
7231
7232 auto it = info_VkInstance.find(instance);
7233 if (it == info_VkInstance.end()) return false;
7234
7235 return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7236}
7237
7238bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string& name) const {
7239 AutoLock<RecursiveLock> lock(mLock);
7240
7241 auto it = info_VkDevice.find(device);
7242 if (it == info_VkDevice.end()) return false;
7243
7244 return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7245}
7246
7247VkDevice ResourceTracker::getDevice(VkCommandBuffer commandBuffer) const {
7248 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7249 if (!cb) {
7250 return nullptr;
7251 }
7252 return cb->device;
7253}
7254
7255// Resets staging stream for this command buffer and primary command buffers
7256// where this command buffer has been recorded. If requested, also clears the pending
7257// descriptor sets.
7258void ResourceTracker::resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,
7259 bool alsoResetPrimaries,
7260 bool alsoClearPendingDescriptorSets) {
7261 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7262 if (!cb) {
7263 return;
7264 }
7265 if (cb->privateEncoder) {
7266 sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
7267 cb->privateEncoder = nullptr;
7268 cb->privateStream = nullptr;
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007269 }
7270
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007271 if (alsoClearPendingDescriptorSets && cb->userPtr) {
7272 CommandBufferPendingDescriptorSets* pendingSets =
7273 (CommandBufferPendingDescriptorSets*)cb->userPtr;
7274 pendingSets->sets.clear();
7275 }
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007276
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007277 if (alsoResetPrimaries) {
7278 forAllObjects(cb->superObjects, [this, alsoResetPrimaries,
7279 alsoClearPendingDescriptorSets](void* obj) {
7280 VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
7281 struct goldfish_VkCommandBuffer* superCb =
7282 as_goldfish_VkCommandBuffer(superCommandBuffer);
7283 this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries,
7284 alsoClearPendingDescriptorSets);
7285 });
7286 eraseObjects(&cb->superObjects);
7287 }
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007288
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007289 forAllObjects(cb->subObjects, [cb](void* obj) {
7290 VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
7291 struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
7292 // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
7293 // since the user still might have submittable stuff pending there.
7294 eraseObject(&subCb->superObjects, (void*)cb);
7295 });
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007296
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007297 eraseObjects(&cb->subObjects);
7298}
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007299
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007300// Unlike resetCommandBufferStagingInfo, this does not always erase its
7301// superObjects pointers because the command buffer has merely been
7302// submitted, not reset. However, if the command buffer was recorded with
7303// ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
7304//
7305// Also, we save the set of descriptor sets referenced by this command
7306// buffer because we only submitted the command buffer and it's possible to
7307// update the descriptor set again and re-submit the same command without
7308// recording it (Update-after-bind descriptor sets)
7309void ResourceTracker::resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
7310 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7311 if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
7312 resetCommandBufferStagingInfo(commandBuffer, true /* reset primaries */,
7313 true /* clear pending descriptor sets */);
7314 } else {
7315 resetCommandBufferStagingInfo(commandBuffer, false /* Don't reset primaries */,
7316 false /* Don't clear pending descriptor sets */);
7317 }
7318}
7319
7320void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7321 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7322
7323 if (!p) return;
7324
7325 forAllObjects(p->subObjects, [this](void* commandBuffer) {
7326 this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer,
7327 true /* also reset primaries */,
7328 true /* also clear pending descriptor sets */);
7329 });
7330}
7331
7332void ResourceTracker::addToCommandPool(VkCommandPool commandPool, uint32_t commandBufferCount,
7333 VkCommandBuffer* pCommandBuffers) {
7334 for (uint32_t i = 0; i < commandBufferCount; ++i) {
7335 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7336 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7337 appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
7338 appendObject(&cb->poolObjects, (void*)commandPool);
7339 }
7340}
7341
7342void ResourceTracker::clearCommandPool(VkCommandPool commandPool) {
7343 resetCommandPoolStagingInfo(commandPool);
7344 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7345 forAllObjects(p->subObjects, [this](void* commandBuffer) {
7346 this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
7347 });
7348 eraseObjects(&p->subObjects);
7349}
7350
7351const VkPhysicalDeviceMemoryProperties& ResourceTracker::getPhysicalDeviceMemoryProperties(
7352 void* context, VkDevice device, VkPhysicalDevice physicalDevice) {
7353 if (!mCachedPhysicalDeviceMemoryProps) {
7354 if (physicalDevice == VK_NULL_HANDLE) {
Hailin zhang5770e4c2022-08-29 19:56:11 +00007355 AutoLock<RecursiveLock> lock(mLock);
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007356
7357 auto deviceInfoIt = info_VkDevice.find(device);
7358 if (deviceInfoIt == info_VkDevice.end()) {
Gurchetan Singh42361f72024-05-16 17:37:11 -07007359 mesa_loge("Failed to pass device or physical device.");
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007360 abort();
Hailin zhang5770e4c2022-08-29 19:56:11 +00007361 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007362 const auto& deviceInfo = deviceInfoIt->second;
7363 physicalDevice = deviceInfo.physdev;
Lingfeng Yang7efc8572021-07-13 16:30:10 -07007364 }
7365
Yahan Zhou483ac022022-06-13 15:41:11 -07007366 VkEncoder* enc = (VkEncoder*)context;
Yahan Zhou483ac022022-06-13 15:41:11 -07007367
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007368 VkPhysicalDeviceMemoryProperties properties;
7369 enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties, true /* no lock */);
Yahan Zhou483ac022022-06-13 15:41:11 -07007370
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007371 mCachedPhysicalDeviceMemoryProps.emplace(std::move(properties));
Yahan Zhou483ac022022-06-13 15:41:11 -07007372 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007373 return *mCachedPhysicalDeviceMemoryProps;
7374}
Yahan Zhou483ac022022-06-13 15:41:11 -07007375
Lingfeng Yang71b596b2018-11-07 18:03:25 -08007376static ResourceTracker* sTracker = nullptr;
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007377
7378ResourceTracker::ResourceTracker() {
7379 mCreateMapping = new CreateMapping();
7380 mDestroyMapping = new DestroyMapping();
7381 // nothing to do
7382}
7383
7384ResourceTracker::~ResourceTracker() {
7385 delete mCreateMapping;
7386 delete mDestroyMapping;
7387}
7388
7389VulkanHandleMapping* ResourceTracker::createMapping() { return mCreateMapping; }
7390
7391VulkanHandleMapping* ResourceTracker::destroyMapping() { return mDestroyMapping; }
7392
Lingfeng Yang71b596b2018-11-07 18:03:25 -08007393// static
7394ResourceTracker* ResourceTracker::get() {
7395 if (!sTracker) {
7396 // To be initialized once on vulkan device open.
7397 sTracker = new ResourceTracker;
7398 }
7399 return sTracker;
7400}
Lingfeng Yang71b596b2018-11-07 18:03:25 -08007401
Lingfeng Yang967f9af2021-01-22 17:56:24 -08007402// static
Gurchetan Singhc4444b82023-09-19 08:06:20 -07007403ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getCommandBufferEncoder(
7404 VkCommandBuffer commandBuffer) {
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007405 if (!(ResourceTracker::streamFeatureBits &
7406 VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
Lingfeng Yang967f9af2021-01-22 17:56:24 -08007407 auto enc = ResourceTracker::getThreadLocalEncoder();
7408 ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
7409 return enc;
7410 }
7411
7412 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7413 if (!cb->privateEncoder) {
Shalini Sdb704c92023-01-27 21:35:33 +00007414 sStaging.setAllocFree(ResourceTracker::get()->getAlloc(),
7415 ResourceTracker::get()->getFree());
Lingfeng Yang967f9af2021-01-22 17:56:24 -08007416 sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
7417 }
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007418 uint8_t* writtenPtr;
7419 size_t written;
Lingfeng Yang967f9af2021-01-22 17:56:24 -08007420 ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
7421 return cb->privateEncoder;
7422}
7423
7424// static
Gurchetan Singhc4444b82023-09-19 08:06:20 -07007425ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
Lingfeng Yang967f9af2021-01-22 17:56:24 -08007426 auto enc = ResourceTracker::getThreadLocalEncoder();
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007427 if (!(ResourceTracker::streamFeatureBits &
7428 VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
Lingfeng Yang967f9af2021-01-22 17:56:24 -08007429 ResourceTracker::get()->syncEncodersForQueue(queue, enc);
7430 }
7431 return enc;
7432}
7433
7434// static
Gurchetan Singhc4444b82023-09-19 08:06:20 -07007435ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getThreadLocalEncoder() {
Lingfeng Yang967f9af2021-01-22 17:56:24 -08007436 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
7437 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
7438 return vkEncoder;
7439}
Lingfeng Yangf0654ff2019-02-02 12:21:24 -08007440
Lingfeng Yangdb616552021-01-22 17:58:02 -08007441// static
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007442void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) { sSeqnoPtr = seqnoptr; }
Lingfeng Yangdb616552021-01-22 17:58:02 -08007443
7444// static
Gurchetan Singhc4444b82023-09-19 08:06:20 -07007445ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::nextSeqno() {
Lingfeng Yangdb616552021-01-22 17:58:02 -08007446 uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
7447 return res;
7448}
7449
7450// static
Gurchetan Singhc4444b82023-09-19 08:06:20 -07007451ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::getSeqno() {
Lingfeng Yangdb616552021-01-22 17:58:02 -08007452 uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
7453 return res;
7454}
7455
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007456void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties*,
7457 uint32_t) {}
Lingfeng Yang154a33c2019-01-29 19:06:23 -08007458
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007459void ResourceTracker::transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo*, uint32_t) {
Lingfeng Yangdef88ba2018-12-13 12:43:17 -08007460}
Gurchetan Singhfa4dfda2023-09-18 17:11:24 -07007461void ResourceTracker::transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo*, uint32_t) {}
Kaiyi Lifb9dd352021-12-09 22:08:51 +00007462
Yilong Lidbc16d52021-02-04 03:15:21 -08007463#define DEFINE_TRANSFORMED_TYPE_IMPL(type) \
7464 void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
7465 void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
Lingfeng Yang2b1b8cf2019-02-08 09:53:36 -08007466
Yilong Li52ed69b2021-02-05 01:47:32 -08007467LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
Lingfeng Yang2b1b8cf2019-02-08 09:53:36 -08007468
Jason Macnak3d664002023-03-30 16:00:50 -07007469} // namespace vk
7470} // namespace gfxstream