Lingfeng Yang | a963ea0 | 2019-03-21 21:27:04 -0700 | [diff] [blame] | 1 | // Copyright (C) 2018 The Android Open Source Project |
Lingfeng Yang | 71b596b | 2018-11-07 18:03:25 -0800 | [diff] [blame] | 2 | // Copyright (C) 2018 Google Inc. |
| 3 | // |
| 4 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | // you may not use this file except in compliance with the License. |
| 6 | // You may obtain a copy of the License at |
| 7 | // |
| 8 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | // |
| 10 | // Unless required by applicable law or agreed to in writing, software |
| 11 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | // See the License for the specific language governing permissions and |
| 14 | // limitations under the License. |
| 15 | |
Lingfeng Yang | 71b596b | 2018-11-07 18:03:25 -0800 | [diff] [blame] | 16 | #include "ResourceTracker.h" |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 17 | |
Lingfeng Yang | 3175463 | 2018-12-21 18:24:55 -0800 | [diff] [blame] | 18 | #include "../OpenglSystemCommon/EmulatorFeatureInfo.h" |
Lingfeng Yang | 34b5cae | 2019-08-21 14:12:19 -0700 | [diff] [blame] | 19 | #include "../OpenglSystemCommon/HostConnection.h" |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 20 | #include "CommandBufferStagingStream.h" |
| 21 | #include "DescriptorSetVirtualization.h" |
Lingfeng Yang | 58b89c8 | 2018-12-25 11:23:21 -0800 | [diff] [blame] | 22 | #include "HostVisibleMemoryVirtualization.h" |
Lingfeng Yang | 71b596b | 2018-11-07 18:03:25 -0800 | [diff] [blame] | 23 | #include "Resources.h" |
Lingfeng Yang | 131d5a4 | 2018-11-30 12:00:33 -0800 | [diff] [blame] | 24 | #include "VkEncoder.h" |
Joshua Duong | cda9cf1 | 2022-10-17 21:41:48 -0700 | [diff] [blame] | 25 | #include "aemu/base/AlignedBuf.h" |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 26 | #include "gfxstream_vk_private.h" |
Lingfeng Yang | 236abc9 | 2018-12-21 20:19:33 -0800 | [diff] [blame] | 27 | #include "goldfish_address_space.h" |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 28 | #include "goldfish_vk_private_defs.h" |
Gurchetan Singh | feb8fb1 | 2023-05-08 16:36:19 -0700 | [diff] [blame] | 29 | #include "util.h" |
| 30 | #include "virtgpu_gfxstream_protocol.h" |
Sergiu | ad91847 | 2024-05-21 16:28:45 +0100 | [diff] [blame] | 31 | #include "vulkan/vk_enum_string_helper.h" |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 32 | #include "vulkan/vulkan_core.h" |
Kaiyi Li | 6a76b33 | 2022-08-23 08:10:59 -0700 | [diff] [blame] | 33 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 34 | #include "vk_format_info.h" |
Kaiyi Li | 6a76b33 | 2022-08-23 08:10:59 -0700 | [diff] [blame] | 35 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 36 | #include <stdlib.h> |
| 37 | #include <vndk/hardware_buffer.h> |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 38 | |
Jason Macnak | d7c55fb | 2023-12-08 13:15:44 -0800 | [diff] [blame] | 39 | #include <algorithm> |
Lingfeng Yang | e9e77d5 | 2020-03-25 14:01:58 -0700 | [diff] [blame] | 40 | #include <set> |
Lingfeng Yang | 154a33c | 2019-01-29 19:06:23 -0800 | [diff] [blame] | 41 | #include <string> |
Lingfeng Yang | 6ab1b0d | 2018-11-27 23:36:03 -0800 | [diff] [blame] | 42 | #include <unordered_map> |
Lingfeng Yang | e9e77d5 | 2020-03-25 14:01:58 -0700 | [diff] [blame] | 43 | #include <unordered_set> |
Lingfeng Yang | 6ab1b0d | 2018-11-27 23:36:03 -0800 | [diff] [blame] | 44 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 45 | #include "vk_struct_id.h" |
| 46 | #include "vk_util.h" |
Lingfeng Yang | 131d5a4 | 2018-11-30 12:00:33 -0800 | [diff] [blame] | 47 | |
Yahan Zhou | f72403b | 2022-02-08 18:50:35 -0800 | [diff] [blame] | 48 | #if defined(__ANDROID__) || defined(__linux__) || defined(__APPLE__) |
Lingfeng Yang | f83538d | 2019-03-07 15:07:36 -0800 | [diff] [blame] | 49 | |
Yahan Zhou | a499e44 | 2019-02-26 16:35:01 -0800 | [diff] [blame] | 50 | #include <sys/mman.h> |
| 51 | #include <sys/syscall.h> |
| 52 | |
Yahan Zhou | a499e44 | 2019-02-26 16:35:01 -0800 | [diff] [blame] | 53 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 54 | static inline int inline_memfd_create(const char* name, unsigned int flags) { |
Gurchetan Singh | f0b9b21 | 2024-01-22 10:19:40 -0800 | [diff] [blame] | 55 | #if defined(__ANDROID__) |
Yahan Zhou | a499e44 | 2019-02-26 16:35:01 -0800 | [diff] [blame] | 56 | return syscall(SYS_memfd_create, name, flags); |
Gurchetan Singh | f0b9b21 | 2024-01-22 10:19:40 -0800 | [diff] [blame] | 57 | #else |
| 58 | return -1; |
Yahan Zhou | a499e44 | 2019-02-26 16:35:01 -0800 | [diff] [blame] | 59 | #endif |
| 60 | } |
Gurchetan Singh | a81c85e | 2022-02-07 18:47:12 -0800 | [diff] [blame] | 61 | |
Elliott Hughes | 4564a05 | 2019-08-05 18:55:53 -0700 | [diff] [blame] | 62 | #define memfd_create inline_memfd_create |
Gurchetan Singh | a81c85e | 2022-02-07 18:47:12 -0800 | [diff] [blame] | 63 | #endif |
Yahan Zhou | a499e44 | 2019-02-26 16:35:01 -0800 | [diff] [blame] | 64 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 65 | #ifndef VK_USE_PLATFORM_FUCHSIA |
| 66 | void zx_handle_close(zx_handle_t) {} |
| 67 | void zx_event_create(int, zx_handle_t*) {} |
Lingfeng Yang | 131d5a4 | 2018-11-30 12:00:33 -0800 | [diff] [blame] | 68 | #endif |
| 69 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 70 | static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0); |
Lingfeng Yang | 6ab1b0d | 2018-11-27 23:36:03 -0800 | [diff] [blame] | 71 | |
Jason Macnak | 3d66400 | 2023-03-30 16:00:50 -0700 | [diff] [blame] | 72 | namespace gfxstream { |
| 73 | namespace vk { |
Lingfeng Yang | 71b596b | 2018-11-07 18:03:25 -0800 | [diff] [blame] | 74 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 75 | #define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \ |
| 76 | void mapHandles_##type_name(type_name* handles, size_t count) override { \ |
| 77 | for (size_t i = 0; i < count; ++i) { \ |
| 78 | map_impl; \ |
| 79 | } \ |
| 80 | } \ |
| 81 | void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, \ |
| 82 | size_t count) override { \ |
| 83 | for (size_t i = 0; i < count; ++i) { \ |
| 84 | map_to_u64_impl; \ |
| 85 | } \ |
| 86 | } \ |
| 87 | void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) \ |
| 88 | override { \ |
| 89 | for (size_t i = 0; i < count; ++i) { \ |
| 90 | map_from_u64_impl; \ |
| 91 | } \ |
| 92 | } |
Lingfeng Yang | 2285df1 | 2018-11-17 16:25:11 -0800 | [diff] [blame] | 93 | |
| 94 | #define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \ |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 95 | class class_name : public VulkanHandleMapping { \ |
| 96 | public: \ |
| 97 | virtual ~class_name() {} \ |
| 98 | GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \ |
| 99 | }; |
Lingfeng Yang | 2285df1 | 2018-11-17 16:25:11 -0800 | [diff] [blame] | 100 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 101 | #define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \ |
| 102 | MAKE_HANDLE_MAPPING_FOREACH( \ |
| 103 | type_name, handles[i] = new_from_host_##type_name(handles[i]); \ |
| 104 | ResourceTracker::get()->register_##type_name(handles[i]); \ |
| 105 | , handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \ |
| 106 | handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); \ |
| 107 | ResourceTracker::get()->register_##type_name(handles[i]);) |
Lingfeng Yang | 2285df1 | 2018-11-17 16:25:11 -0800 | [diff] [blame] | 108 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 109 | #define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \ |
| 110 | MAKE_HANDLE_MAPPING_FOREACH( \ |
| 111 | type_name, handles[i] = get_host_##type_name(handles[i]), \ |
Lingfeng Yang | 04a5719 | 2018-12-20 14:06:45 -0800 | [diff] [blame] | 112 | handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \ |
| 113 | handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i])) |
Lingfeng Yang | 2285df1 | 2018-11-17 16:25:11 -0800 | [diff] [blame] | 114 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 115 | #define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \ |
| 116 | MAKE_HANDLE_MAPPING_FOREACH(type_name, \ |
| 117 | ResourceTracker::get()->unregister_##type_name(handles[i]); \ |
| 118 | delete_goldfish_##type_name(handles[i]), (void)handle_u64s[i]; \ |
| 119 | delete_goldfish_##type_name(handles[i]), (void)handles[i]; \ |
| 120 | delete_goldfish_##type_name((type_name)handle_u64s[i])) |
Lingfeng Yang | 2285df1 | 2018-11-17 16:25:11 -0800 | [diff] [blame] | 121 | |
| 122 | DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE) |
Lingfeng Yang | 2285df1 | 2018-11-17 16:25:11 -0800 | [diff] [blame] | 123 | DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE) |
Lingfeng Yang | 71b596b | 2018-11-07 18:03:25 -0800 | [diff] [blame] | 124 | |
Lingfeng Yang | db61655 | 2021-01-22 17:58:02 -0800 | [diff] [blame] | 125 | static uint32_t* sSeqnoPtr = nullptr; |
| 126 | |
Lingfeng Yang | 4032911 | 2021-01-22 18:01:43 -0800 | [diff] [blame] | 127 | // static |
| 128 | uint32_t ResourceTracker::streamFeatureBits = 0; |
| 129 | ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks; |
| 130 | |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 131 | struct StagingInfo { |
| 132 | Lock mLock; |
| 133 | std::vector<CommandBufferStagingStream*> streams; |
| 134 | std::vector<VkEncoder*> encoders; |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 135 | /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s) |
| 136 | /// \param allocFn is the callback to allocate memory |
| 137 | /// \param freeFn is the callback to free memory |
| 138 | void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn, |
| 139 | CommandBufferStagingStream::Free&& freeFn) { |
| 140 | mAlloc = allocFn; |
| 141 | mFree = freeFn; |
| 142 | } |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 143 | |
Yilong Li | 27957ca | 2021-01-27 10:53:04 -0800 | [diff] [blame] | 144 | ~StagingInfo() { |
| 145 | for (auto stream : streams) { |
| 146 | delete stream; |
| 147 | } |
| 148 | |
| 149 | for (auto encoder : encoders) { |
| 150 | delete encoder; |
| 151 | } |
| 152 | } |
| 153 | |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 154 | void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) { |
Gurchetan Singh | 6c906de | 2021-10-21 17:09:00 -0700 | [diff] [blame] | 155 | AutoLock<Lock> lock(mLock); |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 156 | stream->reset(); |
| 157 | streams.push_back(stream); |
| 158 | encoders.push_back(encoder); |
| 159 | } |
| 160 | |
| 161 | void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) { |
Gurchetan Singh | 6c906de | 2021-10-21 17:09:00 -0700 | [diff] [blame] | 162 | AutoLock<Lock> lock(mLock); |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 163 | CommandBufferStagingStream* stream; |
| 164 | VkEncoder* encoder; |
| 165 | if (streams.empty()) { |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 166 | if (mAlloc && mFree) { |
| 167 | // if custom allocators are provided, forward them to CommandBufferStagingStream |
| 168 | stream = new CommandBufferStagingStream(mAlloc, mFree); |
| 169 | } else { |
| 170 | stream = new CommandBufferStagingStream; |
| 171 | } |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 172 | encoder = new VkEncoder(stream); |
| 173 | } else { |
| 174 | stream = streams.back(); |
| 175 | encoder = encoders.back(); |
| 176 | streams.pop_back(); |
| 177 | encoders.pop_back(); |
| 178 | } |
| 179 | *streamOut = stream; |
| 180 | *encoderOut = encoder; |
| 181 | } |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 182 | |
| 183 | private: |
| 184 | CommandBufferStagingStream::Alloc mAlloc = nullptr; |
| 185 | CommandBufferStagingStream::Free mFree = nullptr; |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 186 | }; |
| 187 | |
| 188 | static StagingInfo sStaging; |
| 189 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 190 | struct CommandBufferPendingDescriptorSets { |
| 191 | std::unordered_set<VkDescriptorSet> sets; |
| 192 | }; |
| 193 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 194 | #define HANDLE_REGISTER_IMPL_IMPL(type) \ |
| 195 | void ResourceTracker::register_##type(type obj) { \ |
| 196 | AutoLock<RecursiveLock> lock(mLock); \ |
| 197 | info_##type[obj] = type##_Info(); \ |
Lingfeng Yang | f0654ff | 2019-02-02 12:21:24 -0800 | [diff] [blame] | 198 | } |
| 199 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 200 | #define HANDLE_UNREGISTER_IMPL_IMPL(type) \ |
| 201 | void ResourceTracker::unregister_##type(type obj) { \ |
| 202 | AutoLock<RecursiveLock> lock(mLock); \ |
| 203 | info_##type.erase(obj); \ |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 204 | } |
| 205 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 206 | GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL) |
| 207 | GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL) |
| 208 | uint32_t getWaitSemaphoreCount(const VkSubmitInfo& pSubmit) { return pSubmit.waitSemaphoreCount; } |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 209 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 210 | uint32_t getWaitSemaphoreCount(const VkSubmitInfo2& pSubmit) { |
| 211 | return pSubmit.waitSemaphoreInfoCount; |
| 212 | } |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 213 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 214 | uint32_t getCommandBufferCount(const VkSubmitInfo& pSubmit) { return pSubmit.commandBufferCount; } |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 215 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 216 | uint32_t getCommandBufferCount(const VkSubmitInfo2& pSubmit) { |
| 217 | return pSubmit.commandBufferInfoCount; |
| 218 | } |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 219 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 220 | uint32_t getSignalSemaphoreCount(const VkSubmitInfo& pSubmit) { |
| 221 | return pSubmit.signalSemaphoreCount; |
| 222 | } |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 223 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 224 | uint32_t getSignalSemaphoreCount(const VkSubmitInfo2& pSubmit) { |
| 225 | return pSubmit.signalSemaphoreInfoCount; |
| 226 | } |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 227 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 228 | VkSemaphore getWaitSemaphore(const VkSubmitInfo& pSubmit, int i) { |
| 229 | return pSubmit.pWaitSemaphores[i]; |
| 230 | } |
Lingfeng Yang | 39a276e | 2019-06-17 13:27:22 -0700 | [diff] [blame] | 231 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 232 | VkSemaphore getWaitSemaphore(const VkSubmitInfo2& pSubmit, int i) { |
| 233 | return pSubmit.pWaitSemaphoreInfos[i].semaphore; |
| 234 | } |
Lingfeng Yang | f8cdd8b | 2021-02-08 11:32:45 -0800 | [diff] [blame] | 235 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 236 | VkSemaphore getSignalSemaphore(const VkSubmitInfo& pSubmit, int i) { |
| 237 | return pSubmit.pSignalSemaphores[i]; |
| 238 | } |
Lingfeng Yang | 39a276e | 2019-06-17 13:27:22 -0700 | [diff] [blame] | 239 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 240 | VkSemaphore getSignalSemaphore(const VkSubmitInfo2& pSubmit, int i) { |
| 241 | return pSubmit.pSignalSemaphoreInfos[i].semaphore; |
| 242 | } |
Lingfeng Yang | f89f75d | 2020-07-09 17:42:33 -0700 | [diff] [blame] | 243 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 244 | VkCommandBuffer getCommandBuffer(const VkSubmitInfo& pSubmit, int i) { |
| 245 | return pSubmit.pCommandBuffers[i]; |
| 246 | } |
Lingfeng Yang | f89f75d | 2020-07-09 17:42:33 -0700 | [diff] [blame] | 247 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 248 | VkCommandBuffer getCommandBuffer(const VkSubmitInfo2& pSubmit, int i) { |
| 249 | return pSubmit.pCommandBufferInfos[i].commandBuffer; |
| 250 | } |
Lingfeng Yang | 236abc9 | 2018-12-21 20:19:33 -0800 | [diff] [blame] | 251 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 252 | bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) { |
| 253 | return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags & |
| 254 | VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; |
| 255 | } |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 256 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 257 | VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo( |
| 258 | VkDescriptorType descType, VkDescriptorSet descSet, uint32_t binding, |
| 259 | const VkDescriptorImageInfo* pImageInfo) { |
| 260 | VkDescriptorImageInfo res = *pImageInfo; |
Lingfeng Yang | 236abc9 | 2018-12-21 20:19:33 -0800 | [diff] [blame] | 261 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 262 | if (descType != VK_DESCRIPTOR_TYPE_SAMPLER && |
| 263 | descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 264 | return res; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 265 | |
| 266 | bool immutableSampler = |
| 267 | as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding]; |
| 268 | |
| 269 | if (!immutableSampler) return res; |
| 270 | |
| 271 | res.sampler = 0; |
| 272 | |
| 273 | return res; |
| 274 | } |
| 275 | |
| 276 | bool descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet, uint32_t dstBinding) { |
| 277 | return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding]; |
| 278 | } |
| 279 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 280 | VkDescriptorImageInfo ResourceTracker::filterNonexistentSampler( |
| 281 | const VkDescriptorImageInfo& inputInfo) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 282 | VkSampler sampler = inputInfo.sampler; |
| 283 | |
| 284 | VkDescriptorImageInfo res = inputInfo; |
| 285 | |
| 286 | if (sampler) { |
| 287 | auto it = info_VkSampler.find(sampler); |
| 288 | bool samplerExists = it != info_VkSampler.end(); |
| 289 | if (!samplerExists) res.sampler = 0; |
Lingfeng Yang | e9e77d5 | 2020-03-25 14:01:58 -0700 | [diff] [blame] | 290 | } |
| 291 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 292 | return res; |
| 293 | } |
Lingfeng Yang | 03354c7 | 2020-03-26 13:00:51 -0700 | [diff] [blame] | 294 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 295 | void ResourceTracker::emitDeviceMemoryReport(VkDevice_Info info, |
| 296 | VkDeviceMemoryReportEventTypeEXT type, |
| 297 | uint64_t memoryObjectId, VkDeviceSize size, |
| 298 | VkObjectType objectType, uint64_t objectHandle, |
| 299 | uint32_t heapIndex) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 300 | if (info.deviceMemoryReportCallbacks.empty()) return; |
| 301 | |
| 302 | const VkDeviceMemoryReportCallbackDataEXT callbackData = { |
| 303 | VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, // sType |
| 304 | nullptr, // pNext |
| 305 | 0, // flags |
| 306 | type, // type |
| 307 | memoryObjectId, // memoryObjectId |
| 308 | size, // size |
| 309 | objectType, // objectType |
| 310 | objectHandle, // objectHandle |
| 311 | heapIndex, // heapIndex |
| 312 | }; |
| 313 | for (const auto& callback : info.deviceMemoryReportCallbacks) { |
| 314 | callback.first(&callbackData, callback.second); |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 315 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 316 | } |
David Reveman | e10aae2 | 2019-03-17 15:55:45 -0400 | [diff] [blame] | 317 | |
| 318 | #ifdef VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 319 | inline fuchsia_sysmem::wire::BufferCollectionConstraints defaultBufferCollectionConstraints( |
| 320 | size_t minSizeBytes, size_t minBufferCount, size_t maxBufferCount = 0u, |
| 321 | size_t minBufferCountForCamping = 0u, size_t minBufferCountForDedicatedSlack = 0u, |
| 322 | size_t minBufferCountForSharedSlack = 0u) { |
| 323 | fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {}; |
| 324 | constraints.min_buffer_count = minBufferCount; |
| 325 | if (maxBufferCount > 0) { |
| 326 | constraints.max_buffer_count = maxBufferCount; |
Lingfeng Yang | 236abc9 | 2018-12-21 20:19:33 -0800 | [diff] [blame] | 327 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 328 | if (minBufferCountForCamping) { |
| 329 | constraints.min_buffer_count_for_camping = minBufferCountForCamping; |
Lingfeng Yang | 34b5cae | 2019-08-21 14:12:19 -0700 | [diff] [blame] | 330 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 331 | if (minBufferCountForSharedSlack) { |
| 332 | constraints.min_buffer_count_for_shared_slack = minBufferCountForSharedSlack; |
Lingfeng Yang | b8a38c7 | 2019-02-02 20:27:54 -0800 | [diff] [blame] | 333 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 334 | constraints.has_buffer_memory_constraints = true; |
| 335 | fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints = |
| 336 | constraints.buffer_memory_constraints; |
Lingfeng Yang | b8a38c7 | 2019-02-02 20:27:54 -0800 | [diff] [blame] | 337 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 338 | buffer_constraints.min_size_bytes = minSizeBytes; |
| 339 | buffer_constraints.max_size_bytes = 0xffffffff; |
| 340 | buffer_constraints.physically_contiguous_required = false; |
| 341 | buffer_constraints.secure_required = false; |
Lingfeng Yang | 3175463 | 2018-12-21 18:24:55 -0800 | [diff] [blame] | 342 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 343 | // No restrictions on coherency domain or Heaps. |
| 344 | buffer_constraints.ram_domain_supported = true; |
| 345 | buffer_constraints.cpu_domain_supported = true; |
| 346 | buffer_constraints.inaccessible_domain_supported = true; |
| 347 | buffer_constraints.heap_permitted_count = 2; |
| 348 | buffer_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal; |
| 349 | buffer_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible; |
Lingfeng Yang | b55ed1c | 2019-06-20 15:57:08 -0700 | [diff] [blame] | 350 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 351 | return constraints; |
| 352 | } |
Lingfeng Yang | c53e747 | 2019-03-27 08:50:55 -0700 | [diff] [blame] | 353 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 354 | uint32_t getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo* pImageInfo) { |
| 355 | uint32_t usage = 0u; |
| 356 | VkImageUsageFlags imageUsage = pImageInfo->usage; |
Lingfeng Yang | f89f75d | 2020-07-09 17:42:33 -0700 | [diff] [blame] | 357 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 358 | #define SetUsageBit(BIT, VALUE) \ |
| 359 | if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) { \ |
Ian McKellar | 34ff1e1 | 2021-04-15 01:26:47 +0000 | [diff] [blame] | 360 | usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \ |
Yilong Li | 2f31550 | 2020-07-10 17:51:28 -0700 | [diff] [blame] | 361 | } |
| 362 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 363 | SetUsageBit(COLOR_ATTACHMENT, ColorAttachment); |
| 364 | SetUsageBit(TRANSFER_SRC, TransferSrc); |
| 365 | SetUsageBit(TRANSFER_DST, TransferDst); |
| 366 | SetUsageBit(SAMPLED, Sampled); |
Yilong Li | 2f31550 | 2020-07-10 17:51:28 -0700 | [diff] [blame] | 367 | |
| 368 | #undef SetUsageBit |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 369 | return usage; |
| 370 | } |
Yilong Li | 2f31550 | 2020-07-10 17:51:28 -0700 | [diff] [blame] | 371 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 372 | uint32_t getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage) { |
| 373 | uint32_t usage = 0u; |
Yilong Li | 2f31550 | 2020-07-10 17:51:28 -0700 | [diff] [blame] | 374 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 375 | #define SetUsageBit(BIT, VALUE) \ |
| 376 | if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) { \ |
Ian McKellar | 34ff1e1 | 2021-04-15 01:26:47 +0000 | [diff] [blame] | 377 | usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \ |
Yilong Li | 2f31550 | 2020-07-10 17:51:28 -0700 | [diff] [blame] | 378 | } |
| 379 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 380 | SetUsageBit(TRANSFER_SRC, TransferSrc); |
| 381 | SetUsageBit(TRANSFER_DST, TransferDst); |
| 382 | SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer); |
| 383 | SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer); |
| 384 | SetUsageBit(UNIFORM_BUFFER, UniformBuffer); |
| 385 | SetUsageBit(STORAGE_BUFFER, StorageBuffer); |
| 386 | SetUsageBit(INDEX_BUFFER, IndexBuffer); |
| 387 | SetUsageBit(VERTEX_BUFFER, VertexBuffer); |
| 388 | SetUsageBit(INDIRECT_BUFFER, IndirectBuffer); |
Yilong Li | 2f31550 | 2020-07-10 17:51:28 -0700 | [diff] [blame] | 389 | |
| 390 | #undef SetUsageBit |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 391 | return usage; |
| 392 | } |
| 393 | |
| 394 | uint32_t getBufferCollectionConstraintsVulkanBufferUsage( |
| 395 | const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) { |
| 396 | VkBufferUsageFlags bufferUsage = pBufferConstraintsInfo->createInfo.usage; |
| 397 | return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage); |
| 398 | } |
| 399 | |
| 400 | static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(VkFormat format) { |
| 401 | switch (format) { |
| 402 | case VK_FORMAT_B8G8R8A8_SINT: |
| 403 | case VK_FORMAT_B8G8R8A8_UNORM: |
| 404 | case VK_FORMAT_B8G8R8A8_SRGB: |
| 405 | case VK_FORMAT_B8G8R8A8_SNORM: |
| 406 | case VK_FORMAT_B8G8R8A8_SSCALED: |
| 407 | case VK_FORMAT_B8G8R8A8_USCALED: |
| 408 | return fuchsia_sysmem::wire::PixelFormatType::kBgra32; |
| 409 | case VK_FORMAT_R8G8B8A8_SINT: |
| 410 | case VK_FORMAT_R8G8B8A8_UNORM: |
| 411 | case VK_FORMAT_R8G8B8A8_SRGB: |
| 412 | case VK_FORMAT_R8G8B8A8_SNORM: |
| 413 | case VK_FORMAT_R8G8B8A8_SSCALED: |
| 414 | case VK_FORMAT_R8G8B8A8_USCALED: |
| 415 | return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8; |
| 416 | case VK_FORMAT_R8_UNORM: |
| 417 | case VK_FORMAT_R8_UINT: |
| 418 | case VK_FORMAT_R8_USCALED: |
| 419 | case VK_FORMAT_R8_SNORM: |
| 420 | case VK_FORMAT_R8_SINT: |
| 421 | case VK_FORMAT_R8_SSCALED: |
| 422 | case VK_FORMAT_R8_SRGB: |
| 423 | return fuchsia_sysmem::wire::PixelFormatType::kR8; |
| 424 | case VK_FORMAT_R8G8_UNORM: |
| 425 | case VK_FORMAT_R8G8_UINT: |
| 426 | case VK_FORMAT_R8G8_USCALED: |
| 427 | case VK_FORMAT_R8G8_SNORM: |
| 428 | case VK_FORMAT_R8G8_SINT: |
| 429 | case VK_FORMAT_R8G8_SSCALED: |
| 430 | case VK_FORMAT_R8G8_SRGB: |
| 431 | return fuchsia_sysmem::wire::PixelFormatType::kR8G8; |
| 432 | default: |
| 433 | return fuchsia_sysmem::wire::PixelFormatType::kInvalid; |
| 434 | } |
| 435 | } |
| 436 | |
| 437 | static bool vkFormatMatchesSysmemFormat(VkFormat vkFormat, |
| 438 | fuchsia_sysmem::wire::PixelFormatType sysmemFormat) { |
| 439 | switch (vkFormat) { |
| 440 | case VK_FORMAT_B8G8R8A8_SINT: |
| 441 | case VK_FORMAT_B8G8R8A8_UNORM: |
| 442 | case VK_FORMAT_B8G8R8A8_SRGB: |
| 443 | case VK_FORMAT_B8G8R8A8_SNORM: |
| 444 | case VK_FORMAT_B8G8R8A8_SSCALED: |
| 445 | case VK_FORMAT_B8G8R8A8_USCALED: |
| 446 | return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kBgra32; |
| 447 | case VK_FORMAT_R8G8B8A8_SINT: |
| 448 | case VK_FORMAT_R8G8B8A8_UNORM: |
| 449 | case VK_FORMAT_R8G8B8A8_SRGB: |
| 450 | case VK_FORMAT_R8G8B8A8_SNORM: |
| 451 | case VK_FORMAT_R8G8B8A8_SSCALED: |
| 452 | case VK_FORMAT_R8G8B8A8_USCALED: |
| 453 | return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8; |
| 454 | case VK_FORMAT_R8_UNORM: |
| 455 | case VK_FORMAT_R8_UINT: |
| 456 | case VK_FORMAT_R8_USCALED: |
| 457 | case VK_FORMAT_R8_SNORM: |
| 458 | case VK_FORMAT_R8_SINT: |
| 459 | case VK_FORMAT_R8_SSCALED: |
| 460 | case VK_FORMAT_R8_SRGB: |
| 461 | return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8 || |
| 462 | sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kL8; |
| 463 | case VK_FORMAT_R8G8_UNORM: |
| 464 | case VK_FORMAT_R8G8_UINT: |
| 465 | case VK_FORMAT_R8G8_USCALED: |
| 466 | case VK_FORMAT_R8G8_SNORM: |
| 467 | case VK_FORMAT_R8G8_SINT: |
| 468 | case VK_FORMAT_R8G8_SSCALED: |
| 469 | case VK_FORMAT_R8G8_SRGB: |
| 470 | return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8; |
| 471 | default: |
| 472 | return false; |
| 473 | } |
| 474 | } |
| 475 | |
| 476 | static VkFormat sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format) { |
| 477 | switch (format) { |
| 478 | case fuchsia_sysmem::wire::PixelFormatType::kBgra32: |
| 479 | return VK_FORMAT_B8G8R8A8_SRGB; |
| 480 | case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8: |
| 481 | return VK_FORMAT_R8G8B8A8_SRGB; |
| 482 | case fuchsia_sysmem::wire::PixelFormatType::kL8: |
| 483 | case fuchsia_sysmem::wire::PixelFormatType::kR8: |
| 484 | return VK_FORMAT_R8_UNORM; |
| 485 | case fuchsia_sysmem::wire::PixelFormatType::kR8G8: |
| 486 | return VK_FORMAT_R8G8_UNORM; |
| 487 | default: |
| 488 | return VK_FORMAT_UNDEFINED; |
| 489 | } |
| 490 | } |
| 491 | |
Mitchell Kember | d9f3e4a | 2024-01-29 16:53:33 -0800 | [diff] [blame] | 492 | // TODO(fxbug.dev/42172354): This is currently only used for allocating |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 493 | // memory for dedicated external images. It should be migrated to use |
| 494 | // SetBufferCollectionImageConstraintsFUCHSIA. |
C Stout | 5a3a422 | 2023-11-14 16:31:56 -0800 | [diff] [blame] | 495 | VkResult ResourceTracker::setBufferCollectionConstraintsFUCHSIA( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 496 | VkEncoder* enc, VkDevice device, |
| 497 | fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection, |
| 498 | const VkImageCreateInfo* pImageInfo) { |
| 499 | if (pImageInfo == nullptr) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 500 | mesa_loge("setBufferCollectionConstraints: pImageInfo cannot be null."); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 501 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
Yilong Li | 2f31550 | 2020-07-10 17:51:28 -0700 | [diff] [blame] | 502 | } |
| 503 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 504 | const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = { |
| 505 | .sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA, |
| 506 | .pNext = nullptr, |
| 507 | .colorSpace = static_cast<uint32_t>(fuchsia_sysmem::wire::ColorSpaceType::kSrgb), |
| 508 | }; |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 509 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 510 | std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos; |
| 511 | if (pImageInfo->format == VK_FORMAT_UNDEFINED) { |
| 512 | const auto kFormats = { |
| 513 | VK_FORMAT_B8G8R8A8_SRGB, |
| 514 | VK_FORMAT_R8G8B8A8_SRGB, |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 515 | }; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 516 | for (auto format : kFormats) { |
| 517 | // shallow copy, using pNext from pImageInfo directly. |
| 518 | auto createInfo = *pImageInfo; |
| 519 | createInfo.format = format; |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 520 | formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{ |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 521 | .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA, |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 522 | .pNext = nullptr, |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 523 | .imageCreateInfo = createInfo, |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 524 | .colorSpaceCount = 1, |
| 525 | .pColorSpaces = &kDefaultColorSpace, |
| 526 | }); |
| 527 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 528 | } else { |
| 529 | formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{ |
| 530 | .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA, |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 531 | .pNext = nullptr, |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 532 | .imageCreateInfo = *pImageInfo, |
| 533 | .colorSpaceCount = 1, |
| 534 | .pColorSpaces = &kDefaultColorSpace, |
| 535 | }); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 536 | } |
| 537 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 538 | VkImageConstraintsInfoFUCHSIA imageConstraints = { |
| 539 | .sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA, |
| 540 | .pNext = nullptr, |
| 541 | .formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()), |
| 542 | .pFormatConstraints = formatInfos.data(), |
| 543 | .bufferCollectionConstraints = |
| 544 | VkBufferCollectionConstraintsInfoFUCHSIA{ |
| 545 | .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA, |
| 546 | .pNext = nullptr, |
| 547 | .minBufferCount = 1, |
| 548 | .maxBufferCount = 0, |
| 549 | .minBufferCountForCamping = 0, |
| 550 | .minBufferCountForDedicatedSlack = 0, |
| 551 | .minBufferCountForSharedSlack = 0, |
| 552 | }, |
| 553 | .flags = 0u, |
| 554 | }; |
| 555 | |
| 556 | return setBufferCollectionImageConstraintsFUCHSIA(enc, device, collection, &imageConstraints); |
| 557 | } |
| 558 | |
| 559 | VkResult addImageBufferCollectionConstraintsFUCHSIA( |
| 560 | VkEncoder* enc, VkDevice device, VkPhysicalDevice physicalDevice, |
| 561 | const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints, // always non-zero |
| 562 | VkImageTiling tiling, fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) { |
| 563 | // First check if the format, tiling and usage is supported on host. |
| 564 | VkImageFormatProperties imageFormatProperties; |
| 565 | auto createInfo = &formatConstraints->imageCreateInfo; |
| 566 | auto result = enc->vkGetPhysicalDeviceImageFormatProperties( |
| 567 | physicalDevice, createInfo->format, createInfo->imageType, tiling, createInfo->usage, |
| 568 | createInfo->flags, &imageFormatProperties, true /* do lock */); |
| 569 | if (result != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 570 | mesa_logd( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 571 | "%s: Image format (%u) type (%u) tiling (%u) " |
| 572 | "usage (%u) flags (%u) not supported by physical " |
| 573 | "device", |
| 574 | __func__, static_cast<uint32_t>(createInfo->format), |
| 575 | static_cast<uint32_t>(createInfo->imageType), static_cast<uint32_t>(tiling), |
| 576 | static_cast<uint32_t>(createInfo->usage), static_cast<uint32_t>(createInfo->flags)); |
| 577 | return VK_ERROR_FORMAT_NOT_SUPPORTED; |
| 578 | } |
| 579 | |
| 580 | // Check if format constraints contains unsupported format features. |
| 581 | { |
| 582 | VkFormatProperties formatProperties; |
| 583 | enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, createInfo->format, |
| 584 | &formatProperties, true /* do lock */); |
| 585 | |
| 586 | auto supportedFeatures = (tiling == VK_IMAGE_TILING_LINEAR) |
| 587 | ? formatProperties.linearTilingFeatures |
| 588 | : formatProperties.optimalTilingFeatures; |
| 589 | auto requiredFeatures = formatConstraints->requiredFormatFeatures; |
| 590 | if ((~supportedFeatures) & requiredFeatures) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 591 | mesa_logd( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 592 | "%s: Host device support features for %s tiling: %08x, " |
| 593 | "required features: %08x, feature bits %08x missing", |
| 594 | __func__, tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL", |
| 595 | static_cast<uint32_t>(requiredFeatures), static_cast<uint32_t>(supportedFeatures), |
| 596 | static_cast<uint32_t>((~supportedFeatures) & requiredFeatures)); |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 597 | return VK_ERROR_FORMAT_NOT_SUPPORTED; |
| 598 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 599 | } |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 600 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 601 | fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints; |
| 602 | if (formatConstraints->sysmemPixelFormat != 0) { |
| 603 | auto pixelFormat = static_cast<fuchsia_sysmem::wire::PixelFormatType>( |
| 604 | formatConstraints->sysmemPixelFormat); |
| 605 | if (createInfo->format != VK_FORMAT_UNDEFINED && |
| 606 | !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 607 | mesa_logd("%s: VkFormat %u doesn't match sysmem pixelFormat %lu", __func__, |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 608 | static_cast<uint32_t>(createInfo->format), formatConstraints->sysmemPixelFormat); |
| 609 | return VK_ERROR_FORMAT_NOT_SUPPORTED; |
| 610 | } |
| 611 | imageConstraints.pixel_format.type = pixelFormat; |
| 612 | } else { |
| 613 | auto pixel_format = vkFormatTypeToSysmem(createInfo->format); |
| 614 | if (pixel_format == fuchsia_sysmem::wire::PixelFormatType::kInvalid) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 615 | mesa_logd("%s: Unsupported VkFormat %u", __func__, |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 616 | static_cast<uint32_t>(createInfo->format)); |
| 617 | return VK_ERROR_FORMAT_NOT_SUPPORTED; |
| 618 | } |
| 619 | imageConstraints.pixel_format.type = pixel_format; |
| 620 | } |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 621 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 622 | imageConstraints.color_spaces_count = formatConstraints->colorSpaceCount; |
| 623 | for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) { |
| 624 | imageConstraints.color_space[0].type = static_cast<fuchsia_sysmem::wire::ColorSpaceType>( |
| 625 | formatConstraints->pColorSpaces[i].colorSpace); |
| 626 | } |
| 627 | |
| 628 | // Get row alignment from host GPU. |
| 629 | VkDeviceSize offset = 0; |
| 630 | VkDeviceSize rowPitchAlignment = 1u; |
| 631 | |
| 632 | if (tiling == VK_IMAGE_TILING_LINEAR) { |
| 633 | VkImageCreateInfo createInfoDup = *createInfo; |
| 634 | createInfoDup.pNext = nullptr; |
| 635 | enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset, &rowPitchAlignment, |
| 636 | true /* do lock */); |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 637 | mesa_logd( |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 638 | "vkGetLinearImageLayout2GOOGLE: format %d offset %lu " |
| 639 | "rowPitchAlignment = %lu", |
| 640 | (int)createInfo->format, offset, rowPitchAlignment); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 641 | } |
| 642 | |
| 643 | imageConstraints.min_coded_width = createInfo->extent.width; |
| 644 | imageConstraints.max_coded_width = 0xfffffff; |
| 645 | imageConstraints.min_coded_height = createInfo->extent.height; |
| 646 | imageConstraints.max_coded_height = 0xffffffff; |
| 647 | // The min_bytes_per_row can be calculated by sysmem using |
| 648 | // |min_coded_width|, |bytes_per_row_divisor| and color format. |
| 649 | imageConstraints.min_bytes_per_row = 0; |
| 650 | imageConstraints.max_bytes_per_row = 0xffffffff; |
| 651 | imageConstraints.max_coded_width_times_coded_height = 0xffffffff; |
| 652 | |
| 653 | imageConstraints.layers = 1; |
| 654 | imageConstraints.coded_width_divisor = 1; |
| 655 | imageConstraints.coded_height_divisor = 1; |
| 656 | imageConstraints.bytes_per_row_divisor = rowPitchAlignment; |
| 657 | imageConstraints.start_offset_divisor = 1; |
| 658 | imageConstraints.display_width_divisor = 1; |
| 659 | imageConstraints.display_height_divisor = 1; |
| 660 | imageConstraints.pixel_format.has_format_modifier = true; |
| 661 | imageConstraints.pixel_format.format_modifier.value = |
| 662 | (tiling == VK_IMAGE_TILING_LINEAR) |
| 663 | ? fuchsia_sysmem::wire::kFormatModifierLinear |
| 664 | : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal; |
| 665 | |
| 666 | constraints->image_format_constraints[constraints->image_format_constraints_count++] = |
| 667 | imageConstraints; |
| 668 | return VK_SUCCESS; |
| 669 | } |
| 670 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 671 | SetBufferCollectionBufferConstraintsResult setBufferCollectionBufferConstraintsImpl( |
| 672 | fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection, |
| 673 | const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) { |
| 674 | const auto& collection = *pCollection; |
| 675 | if (pBufferConstraintsInfo == nullptr) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 676 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 677 | "setBufferCollectionBufferConstraints: " |
| 678 | "pBufferConstraintsInfo cannot be null."); |
| 679 | return {VK_ERROR_OUT_OF_DEVICE_MEMORY}; |
| 680 | } |
| 681 | |
| 682 | fuchsia_sysmem::wire::BufferCollectionConstraints constraints = |
| 683 | defaultBufferCollectionConstraints( |
| 684 | /* min_size_bytes */ pBufferConstraintsInfo->createInfo.size, |
| 685 | /* buffer_count */ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCount); |
| 686 | constraints.usage.vulkan = |
| 687 | getBufferCollectionConstraintsVulkanBufferUsage(pBufferConstraintsInfo); |
| 688 | |
| 689 | constexpr uint32_t kVulkanPriority = 5; |
| 690 | const char kName[] = "GoldfishBufferSysmemShared"; |
| 691 | collection->SetName(kVulkanPriority, fidl::StringView(kName)); |
| 692 | |
| 693 | auto result = collection->SetConstraints(true, constraints); |
| 694 | if (!result.ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 695 | mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 696 | return {VK_ERROR_OUT_OF_DEVICE_MEMORY}; |
| 697 | } |
| 698 | |
| 699 | return {VK_SUCCESS, constraints}; |
| 700 | } |
| 701 | #endif |
| 702 | |
| 703 | uint64_t getAHardwareBufferId(AHardwareBuffer* ahw) { |
| 704 | uint64_t id = 0; |
Jason Macnak | abe57a8 | 2024-02-02 17:02:21 -0800 | [diff] [blame] | 705 | #if defined(ANDROID) |
| 706 | auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(); |
| 707 | gralloc->getId(ahw, &id); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 708 | #else |
| 709 | (void)ahw; |
| 710 | #endif |
| 711 | return id; |
| 712 | } |
| 713 | |
| 714 | void transformExternalResourceMemoryDedicatedRequirementsForGuest( |
| 715 | VkMemoryDedicatedRequirements* dedicatedReqs) { |
| 716 | dedicatedReqs->prefersDedicatedAllocation = VK_TRUE; |
| 717 | dedicatedReqs->requiresDedicatedAllocation = VK_TRUE; |
| 718 | } |
| 719 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 720 | void ResourceTracker::transformImageMemoryRequirementsForGuestLocked(VkImage image, |
| 721 | VkMemoryRequirements* reqs) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 722 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 723 | auto it = info_VkImage.find(image); |
| 724 | if (it == info_VkImage.end()) return; |
| 725 | auto& info = it->second; |
| 726 | if (info.isSysmemBackedMemory) { |
| 727 | auto width = info.createInfo.extent.width; |
| 728 | auto height = info.createInfo.extent.height; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 729 | reqs->size = width * height * 4; |
| 730 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 731 | #else |
| 732 | // Bypass "unused parameter" checks. |
| 733 | (void)image; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 734 | (void)reqs; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 735 | #endif |
| 736 | } |
| 737 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 738 | CoherentMemoryPtr ResourceTracker::freeCoherentMemoryLocked(VkDeviceMemory memory, |
| 739 | VkDeviceMemory_Info& info) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 740 | if (info.coherentMemory && info.ptr) { |
| 741 | if (info.coherentMemory->getDeviceMemory() != memory) { |
| 742 | delete_goldfish_VkDeviceMemory(memory); |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 743 | } |
| 744 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 745 | if (info.ptr) { |
| 746 | info.coherentMemory->release(info.ptr); |
| 747 | info.ptr = nullptr; |
| 748 | } |
| 749 | |
| 750 | return std::move(info.coherentMemory); |
| 751 | } |
| 752 | |
| 753 | return nullptr; |
| 754 | } |
| 755 | |
| 756 | VkResult createFence(VkDevice device, uint64_t hostFenceHandle, int64_t& osHandle) { |
| 757 | struct VirtGpuExecBuffer exec = {}; |
| 758 | struct gfxstreamCreateExportSyncVK exportSync = {}; |
| 759 | VirtGpuDevice* instance = VirtGpuDevice::getInstance(); |
| 760 | |
| 761 | uint64_t hostDeviceHandle = get_host_u64_VkDevice(device); |
| 762 | |
| 763 | exportSync.hdr.opCode = GFXSTREAM_CREATE_EXPORT_SYNC_VK; |
| 764 | exportSync.deviceHandleLo = (uint32_t)hostDeviceHandle; |
| 765 | exportSync.deviceHandleHi = (uint32_t)(hostDeviceHandle >> 32); |
| 766 | exportSync.fenceHandleLo = (uint32_t)hostFenceHandle; |
| 767 | exportSync.fenceHandleHi = (uint32_t)(hostFenceHandle >> 32); |
| 768 | |
| 769 | exec.command = static_cast<void*>(&exportSync); |
| 770 | exec.command_size = sizeof(exportSync); |
| 771 | exec.flags = kFenceOut | kRingIdx; |
| 772 | if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 773 | |
| 774 | osHandle = exec.handle.osHandle; |
| 775 | return VK_SUCCESS; |
| 776 | } |
| 777 | |
| 778 | void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet, |
| 779 | std::unordered_set<VkDescriptorSet>& allDs) { |
| 780 | if (workingSet.empty()) return; |
| 781 | |
| 782 | std::vector<VkCommandBuffer> nextLevel; |
| 783 | for (auto commandBuffer : workingSet) { |
| 784 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 785 | forAllObjects(cb->subObjects, [&nextLevel](void* secondary) { |
| 786 | nextLevel.push_back((VkCommandBuffer)secondary); |
| 787 | }); |
| 788 | } |
| 789 | |
| 790 | collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs); |
| 791 | |
| 792 | for (auto cmdbuf : workingSet) { |
| 793 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf); |
| 794 | |
| 795 | if (!cb->userPtr) { |
| 796 | continue; // No descriptors to update. |
| 797 | } |
| 798 | |
| 799 | CommandBufferPendingDescriptorSets* pendingDescriptorSets = |
| 800 | (CommandBufferPendingDescriptorSets*)(cb->userPtr); |
| 801 | |
| 802 | if (pendingDescriptorSets->sets.empty()) { |
| 803 | continue; // No descriptors to update. |
| 804 | } |
| 805 | |
| 806 | allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end()); |
| 807 | } |
| 808 | } |
| 809 | |
| 810 | void commitDescriptorSetUpdates(void* context, VkQueue queue, |
| 811 | const std::unordered_set<VkDescriptorSet>& sets) { |
| 812 | VkEncoder* enc = (VkEncoder*)context; |
| 813 | |
| 814 | std::unordered_map<VkDescriptorPool, uint32_t> poolSet; |
| 815 | std::vector<VkDescriptorPool> pools; |
| 816 | std::vector<VkDescriptorSetLayout> setLayouts; |
| 817 | std::vector<uint64_t> poolIds; |
| 818 | std::vector<uint32_t> descriptorSetWhichPool; |
| 819 | std::vector<uint32_t> pendingAllocations; |
| 820 | std::vector<uint32_t> writeStartingIndices; |
| 821 | std::vector<VkWriteDescriptorSet> writesForHost; |
| 822 | |
| 823 | uint32_t poolIndex = 0; |
| 824 | uint32_t currentWriteIndex = 0; |
| 825 | for (auto set : sets) { |
| 826 | ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified; |
| 827 | VkDescriptorPool pool = reified->pool; |
| 828 | VkDescriptorSetLayout setLayout = reified->setLayout; |
| 829 | |
| 830 | auto it = poolSet.find(pool); |
| 831 | if (it == poolSet.end()) { |
| 832 | poolSet[pool] = poolIndex; |
| 833 | descriptorSetWhichPool.push_back(poolIndex); |
| 834 | pools.push_back(pool); |
| 835 | ++poolIndex; |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 836 | } else { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 837 | uint32_t savedPoolIndex = it->second; |
| 838 | descriptorSetWhichPool.push_back(savedPoolIndex); |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 839 | } |
| 840 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 841 | poolIds.push_back(reified->poolId); |
| 842 | setLayouts.push_back(setLayout); |
| 843 | pendingAllocations.push_back(reified->allocationPending ? 1 : 0); |
| 844 | writeStartingIndices.push_back(currentWriteIndex); |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 845 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 846 | auto& writes = reified->allWrites; |
Yilong Li | 1c607c7 | 2022-02-23 13:17:06 -0800 | [diff] [blame] | 847 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 848 | for (size_t i = 0; i < writes.size(); ++i) { |
| 849 | uint32_t binding = i; |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 850 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 851 | for (size_t j = 0; j < writes[i].size(); ++j) { |
| 852 | auto& write = writes[i][j]; |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 853 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 854 | if (write.type == DescriptorWriteType::Empty) continue; |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 855 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 856 | uint32_t dstArrayElement = 0; |
Yilong Li | 4d0ee60 | 2021-01-28 01:56:33 -0800 | [diff] [blame] | 857 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 858 | VkDescriptorImageInfo* imageInfo = nullptr; |
| 859 | VkDescriptorBufferInfo* bufferInfo = nullptr; |
| 860 | VkBufferView* bufferView = nullptr; |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 861 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 862 | switch (write.type) { |
| 863 | case DescriptorWriteType::Empty: |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 864 | break; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 865 | case DescriptorWriteType::ImageInfo: |
| 866 | dstArrayElement = j; |
| 867 | imageInfo = &write.imageInfo; |
| 868 | break; |
| 869 | case DescriptorWriteType::BufferInfo: |
| 870 | dstArrayElement = j; |
| 871 | bufferInfo = &write.bufferInfo; |
| 872 | break; |
| 873 | case DescriptorWriteType::BufferView: |
| 874 | dstArrayElement = j; |
| 875 | bufferView = &write.bufferView; |
| 876 | break; |
| 877 | case DescriptorWriteType::InlineUniformBlock: |
| 878 | case DescriptorWriteType::AccelerationStructure: |
| 879 | // TODO |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 880 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 881 | "Encountered pending inline uniform block or acceleration structure " |
| 882 | "desc write, abort (NYI)\n"); |
| 883 | abort(); |
| 884 | default: |
| 885 | break; |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 886 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 887 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 888 | // TODO: Combine multiple writes into one VkWriteDescriptorSet. |
| 889 | VkWriteDescriptorSet forHost = { |
| 890 | VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, |
| 891 | 0 /* TODO: inline uniform block */, |
| 892 | set, |
| 893 | binding, |
| 894 | dstArrayElement, |
| 895 | 1, |
| 896 | write.descriptorType, |
| 897 | imageInfo, |
| 898 | bufferInfo, |
| 899 | bufferView, |
| 900 | }; |
| 901 | |
| 902 | writesForHost.push_back(forHost); |
| 903 | ++currentWriteIndex; |
| 904 | |
| 905 | // Set it back to empty. |
| 906 | write.type = DescriptorWriteType::Empty; |
| 907 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 908 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 909 | } |
| 910 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 911 | // Skip out if there's nothing to VkWriteDescriptorSet home about. |
| 912 | if (writesForHost.empty()) { |
| 913 | return; |
| 914 | } |
| 915 | |
| 916 | enc->vkQueueCommitDescriptorSetUpdatesGOOGLE( |
| 917 | queue, (uint32_t)pools.size(), pools.data(), (uint32_t)sets.size(), setLayouts.data(), |
| 918 | poolIds.data(), descriptorSetWhichPool.data(), pendingAllocations.data(), |
| 919 | writeStartingIndices.data(), (uint32_t)writesForHost.size(), writesForHost.data(), |
| 920 | false /* no lock */); |
| 921 | |
| 922 | // If we got here, then we definitely serviced the allocations. |
| 923 | for (auto set : sets) { |
| 924 | ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified; |
| 925 | reified->allocationPending = false; |
| 926 | } |
| 927 | } |
| 928 | |
| 929 | uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, |
| 930 | VkEncoder* currentEncoder) { |
| 931 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 932 | if (!cb) return 0; |
| 933 | |
| 934 | auto lastEncoder = cb->lastUsedEncoder; |
| 935 | |
| 936 | if (lastEncoder == currentEncoder) return 0; |
| 937 | |
| 938 | currentEncoder->incRef(); |
| 939 | |
| 940 | cb->lastUsedEncoder = currentEncoder; |
| 941 | |
| 942 | if (!lastEncoder) return 0; |
| 943 | |
| 944 | auto oldSeq = cb->sequenceNumber; |
| 945 | cb->sequenceNumber += 2; |
| 946 | lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1, |
| 947 | true /* do lock */); |
| 948 | lastEncoder->flush(); |
| 949 | currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2, |
| 950 | true /* do lock */); |
| 951 | |
| 952 | if (lastEncoder->decRef()) { |
| 953 | cb->lastUsedEncoder = nullptr; |
| 954 | } |
| 955 | return 0; |
| 956 | } |
| 957 | |
| 958 | void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount, |
| 959 | const VkDescriptorSet* pDescriptorSets) { |
| 960 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 961 | |
| 962 | if (!cb->userPtr) { |
| 963 | CommandBufferPendingDescriptorSets* newPendingSets = new CommandBufferPendingDescriptorSets; |
| 964 | cb->userPtr = newPendingSets; |
| 965 | } |
| 966 | |
| 967 | CommandBufferPendingDescriptorSets* pendingSets = |
| 968 | (CommandBufferPendingDescriptorSets*)cb->userPtr; |
| 969 | |
| 970 | for (uint32_t i = 0; i < descriptorSetCount; ++i) { |
| 971 | pendingSets->sets.insert(pDescriptorSets[i]); |
| 972 | } |
| 973 | } |
| 974 | |
| 975 | void decDescriptorSetLayoutRef(void* context, VkDevice device, |
| 976 | VkDescriptorSetLayout descriptorSetLayout, |
| 977 | const VkAllocationCallbacks* pAllocator) { |
| 978 | if (!descriptorSetLayout) return; |
| 979 | |
| 980 | struct goldfish_VkDescriptorSetLayout* setLayout = |
| 981 | as_goldfish_VkDescriptorSetLayout(descriptorSetLayout); |
| 982 | |
| 983 | if (0 == --setLayout->layoutInfo->refcount) { |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 984 | VkEncoder* enc = (VkEncoder*)context; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 985 | enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator, |
| 986 | true /* do lock */); |
| 987 | } |
| 988 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 989 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 990 | void ResourceTracker::ensureSyncDeviceFd() { |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 991 | #if GFXSTREAM_ENABLE_GUEST_GOLDFISH |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 992 | if (mSyncDeviceFd >= 0) return; |
| 993 | mSyncDeviceFd = goldfish_sync_open(); |
| 994 | if (mSyncDeviceFd >= 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 995 | mesa_logd("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 996 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 997 | mesa_logd("%s: failed to create sync device for current Vulkan process\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 998 | } |
| 999 | #endif |
| 1000 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1001 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1002 | void ResourceTracker::unregister_VkInstance(VkInstance instance) { |
| 1003 | AutoLock<RecursiveLock> lock(mLock); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1004 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1005 | auto it = info_VkInstance.find(instance); |
| 1006 | if (it == info_VkInstance.end()) return; |
| 1007 | auto info = it->second; |
| 1008 | info_VkInstance.erase(instance); |
| 1009 | lock.unlock(); |
| 1010 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1011 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1012 | void ResourceTracker::unregister_VkDevice(VkDevice device) { |
| 1013 | AutoLock<RecursiveLock> lock(mLock); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1014 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1015 | auto it = info_VkDevice.find(device); |
| 1016 | if (it == info_VkDevice.end()) return; |
| 1017 | auto info = it->second; |
| 1018 | info_VkDevice.erase(device); |
| 1019 | lock.unlock(); |
| 1020 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1021 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1022 | void ResourceTracker::unregister_VkCommandPool(VkCommandPool pool) { |
| 1023 | if (!pool) return; |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1024 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1025 | clearCommandPool(pool); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1026 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1027 | AutoLock<RecursiveLock> lock(mLock); |
| 1028 | info_VkCommandPool.erase(pool); |
| 1029 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1030 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1031 | void ResourceTracker::unregister_VkSampler(VkSampler sampler) { |
| 1032 | if (!sampler) return; |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1033 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1034 | AutoLock<RecursiveLock> lock(mLock); |
| 1035 | info_VkSampler.erase(sampler); |
| 1036 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1037 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1038 | void ResourceTracker::unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) { |
| 1039 | resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, |
| 1040 | true /* also clear pending descriptor sets */); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1041 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1042 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 1043 | if (!cb) return; |
| 1044 | if (cb->lastUsedEncoder) { |
| 1045 | cb->lastUsedEncoder->decRef(); |
| 1046 | } |
| 1047 | eraseObjects(&cb->subObjects); |
| 1048 | forAllObjects(cb->poolObjects, [cb](void* commandPool) { |
| 1049 | struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool); |
| 1050 | eraseObject(&p->subObjects, (void*)cb); |
| 1051 | }); |
| 1052 | eraseObjects(&cb->poolObjects); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1053 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1054 | if (cb->userPtr) { |
| 1055 | CommandBufferPendingDescriptorSets* pendingSets = |
| 1056 | (CommandBufferPendingDescriptorSets*)cb->userPtr; |
| 1057 | delete pendingSets; |
| 1058 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1059 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1060 | AutoLock<RecursiveLock> lock(mLock); |
| 1061 | info_VkCommandBuffer.erase(commandBuffer); |
| 1062 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1063 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1064 | void ResourceTracker::unregister_VkQueue(VkQueue queue) { |
| 1065 | struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue); |
| 1066 | if (!q) return; |
| 1067 | if (q->lastUsedEncoder) { |
| 1068 | q->lastUsedEncoder->decRef(); |
| 1069 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1070 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1071 | AutoLock<RecursiveLock> lock(mLock); |
| 1072 | info_VkQueue.erase(queue); |
| 1073 | } |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1074 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1075 | void ResourceTracker::unregister_VkDeviceMemory(VkDeviceMemory mem) { |
| 1076 | AutoLock<RecursiveLock> lock(mLock); |
| 1077 | |
| 1078 | auto it = info_VkDeviceMemory.find(mem); |
| 1079 | if (it == info_VkDeviceMemory.end()) return; |
| 1080 | |
| 1081 | auto& memInfo = it->second; |
| 1082 | |
| 1083 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 1084 | if (memInfo.ahw) { |
| 1085 | auto* gralloc = |
| 1086 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(); |
| 1087 | gralloc->release(memInfo.ahw); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 1088 | } |
David Reveman | 5b7c584 | 2019-02-20 01:06:48 -0500 | [diff] [blame] | 1089 | #endif |
Lingfeng Yang | 9b82e33 | 2019-02-13 17:53:57 -0800 | [diff] [blame] | 1090 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1091 | if (memInfo.vmoHandle != ZX_HANDLE_INVALID) { |
| 1092 | zx_handle_close(memInfo.vmoHandle); |
| 1093 | } |
| 1094 | |
| 1095 | info_VkDeviceMemory.erase(mem); |
| 1096 | } |
| 1097 | |
| 1098 | void ResourceTracker::unregister_VkImage(VkImage img) { |
| 1099 | AutoLock<RecursiveLock> lock(mLock); |
| 1100 | |
| 1101 | auto it = info_VkImage.find(img); |
| 1102 | if (it == info_VkImage.end()) return; |
| 1103 | |
| 1104 | auto& imageInfo = it->second; |
| 1105 | |
| 1106 | info_VkImage.erase(img); |
| 1107 | } |
| 1108 | |
| 1109 | void ResourceTracker::unregister_VkBuffer(VkBuffer buf) { |
| 1110 | AutoLock<RecursiveLock> lock(mLock); |
| 1111 | |
| 1112 | auto it = info_VkBuffer.find(buf); |
| 1113 | if (it == info_VkBuffer.end()) return; |
| 1114 | |
| 1115 | info_VkBuffer.erase(buf); |
| 1116 | } |
| 1117 | |
| 1118 | void ResourceTracker::unregister_VkSemaphore(VkSemaphore sem) { |
| 1119 | AutoLock<RecursiveLock> lock(mLock); |
| 1120 | |
| 1121 | auto it = info_VkSemaphore.find(sem); |
| 1122 | if (it == info_VkSemaphore.end()) return; |
| 1123 | |
| 1124 | auto& semInfo = it->second; |
| 1125 | |
| 1126 | if (semInfo.eventHandle != ZX_HANDLE_INVALID) { |
| 1127 | zx_handle_close(semInfo.eventHandle); |
| 1128 | } |
| 1129 | |
| 1130 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 1131 | if (semInfo.syncFd.value_or(-1) >= 0) { |
| 1132 | auto* syncHelper = |
| 1133 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 1134 | syncHelper->close(semInfo.syncFd.value()); |
| 1135 | } |
| 1136 | #endif |
| 1137 | |
| 1138 | info_VkSemaphore.erase(sem); |
| 1139 | } |
| 1140 | |
| 1141 | void ResourceTracker::unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) { |
| 1142 | AutoLock<RecursiveLock> lock(mLock); |
| 1143 | auto it = info_VkDescriptorUpdateTemplate.find(templ); |
| 1144 | if (it == info_VkDescriptorUpdateTemplate.end()) return; |
| 1145 | |
| 1146 | auto& info = it->second; |
| 1147 | if (info.templateEntryCount) delete[] info.templateEntries; |
| 1148 | if (info.imageInfoCount) { |
| 1149 | delete[] info.imageInfoIndices; |
| 1150 | delete[] info.imageInfos; |
| 1151 | } |
| 1152 | if (info.bufferInfoCount) { |
| 1153 | delete[] info.bufferInfoIndices; |
| 1154 | delete[] info.bufferInfos; |
| 1155 | } |
| 1156 | if (info.bufferViewCount) { |
| 1157 | delete[] info.bufferViewIndices; |
| 1158 | delete[] info.bufferViews; |
| 1159 | } |
| 1160 | info_VkDescriptorUpdateTemplate.erase(it); |
| 1161 | } |
| 1162 | |
| 1163 | void ResourceTracker::unregister_VkFence(VkFence fence) { |
| 1164 | AutoLock<RecursiveLock> lock(mLock); |
| 1165 | auto it = info_VkFence.find(fence); |
| 1166 | if (it == info_VkFence.end()) return; |
| 1167 | |
| 1168 | auto& fenceInfo = it->second; |
| 1169 | (void)fenceInfo; |
| 1170 | |
| 1171 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 1172 | if (fenceInfo.syncFd >= 0) { |
| 1173 | auto* syncHelper = |
| 1174 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 1175 | syncHelper->close(fenceInfo.syncFd); |
| 1176 | } |
| 1177 | #endif |
| 1178 | |
| 1179 | info_VkFence.erase(fence); |
| 1180 | } |
| 1181 | |
| 1182 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 1183 | void ResourceTracker::unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection) { |
| 1184 | AutoLock<RecursiveLock> lock(mLock); |
| 1185 | info_VkBufferCollectionFUCHSIA.erase(collection); |
| 1186 | } |
| 1187 | #endif |
| 1188 | |
Jason Macnak | eea882a | 2023-11-15 17:29:11 -0800 | [diff] [blame] | 1189 | void ResourceTracker::unregister_VkDescriptorSet_locked(VkDescriptorSet set) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1190 | struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set); |
| 1191 | delete ds->reified; |
| 1192 | info_VkDescriptorSet.erase(set); |
| 1193 | } |
| 1194 | |
| 1195 | void ResourceTracker::unregister_VkDescriptorSet(VkDescriptorSet set) { |
| 1196 | if (!set) return; |
| 1197 | |
| 1198 | AutoLock<RecursiveLock> lock(mLock); |
| 1199 | unregister_VkDescriptorSet_locked(set); |
| 1200 | } |
| 1201 | |
| 1202 | void ResourceTracker::unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) { |
| 1203 | if (!setLayout) return; |
| 1204 | |
| 1205 | AutoLock<RecursiveLock> lock(mLock); |
| 1206 | delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo; |
| 1207 | info_VkDescriptorSetLayout.erase(setLayout); |
| 1208 | } |
| 1209 | |
| 1210 | void ResourceTracker::freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device, |
| 1211 | uint32_t descriptorSetCount, |
| 1212 | const VkDescriptorSet* sets) { |
| 1213 | for (uint32_t i = 0; i < descriptorSetCount; ++i) { |
| 1214 | struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]); |
| 1215 | if (ds->reified->allocationPending) { |
| 1216 | unregister_VkDescriptorSet(sets[i]); |
| 1217 | delete_goldfish_VkDescriptorSet(sets[i]); |
| 1218 | } else { |
| 1219 | enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */); |
| 1220 | } |
| 1221 | } |
| 1222 | } |
| 1223 | |
| 1224 | void ResourceTracker::clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device, |
| 1225 | VkDescriptorPool pool) { |
| 1226 | std::vector<VkDescriptorSet> toClear = |
| 1227 | clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate); |
| 1228 | |
| 1229 | for (auto set : toClear) { |
| 1230 | if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) { |
| 1231 | VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout; |
| 1232 | decDescriptorSetLayoutRef(context, device, setLayout, nullptr); |
| 1233 | } |
| 1234 | unregister_VkDescriptorSet(set); |
| 1235 | delete_goldfish_VkDescriptorSet(set); |
| 1236 | } |
| 1237 | } |
| 1238 | |
| 1239 | void ResourceTracker::unregister_VkDescriptorPool(VkDescriptorPool pool) { |
| 1240 | if (!pool) return; |
| 1241 | |
| 1242 | AutoLock<RecursiveLock> lock(mLock); |
| 1243 | |
| 1244 | struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool); |
| 1245 | delete dp->allocInfo; |
| 1246 | |
| 1247 | info_VkDescriptorPool.erase(pool); |
| 1248 | } |
| 1249 | |
| 1250 | void ResourceTracker::deviceMemoryTransform_fromhost(VkDeviceMemory* memory, uint32_t memoryCount, |
| 1251 | VkDeviceSize* offset, uint32_t offsetCount, |
| 1252 | VkDeviceSize* size, uint32_t sizeCount, |
| 1253 | uint32_t* typeIndex, uint32_t typeIndexCount, |
| 1254 | uint32_t* typeBits, uint32_t typeBitsCount) { |
| 1255 | (void)memory; |
| 1256 | (void)memoryCount; |
| 1257 | (void)offset; |
| 1258 | (void)offsetCount; |
| 1259 | (void)size; |
| 1260 | (void)sizeCount; |
| 1261 | (void)typeIndex; |
| 1262 | (void)typeIndexCount; |
| 1263 | (void)typeBits; |
| 1264 | (void)typeBitsCount; |
| 1265 | } |
| 1266 | |
| 1267 | void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost( |
| 1268 | VkExternalMemoryProperties* pProperties, uint32_t) { |
| 1269 | VkExternalMemoryHandleTypeFlags supportedHandleType = 0u; |
| 1270 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 1271 | supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA; |
| 1272 | #endif // VK_USE_PLATFORM_FUCHSIA |
| 1273 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 1274 | supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT | |
| 1275 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; |
| 1276 | #endif // VK_USE_PLATFORM_ANDROID_KHR |
| 1277 | if (supportedHandleType) { |
| 1278 | pProperties->compatibleHandleTypes &= supportedHandleType; |
| 1279 | pProperties->exportFromImportedHandleTypes &= supportedHandleType; |
| 1280 | } |
| 1281 | } |
| 1282 | |
| 1283 | void ResourceTracker::setInstanceInfo(VkInstance instance, uint32_t enabledExtensionCount, |
| 1284 | const char* const* ppEnabledExtensionNames, |
| 1285 | uint32_t apiVersion) { |
| 1286 | AutoLock<RecursiveLock> lock(mLock); |
| 1287 | auto& info = info_VkInstance[instance]; |
| 1288 | info.highestApiVersion = apiVersion; |
| 1289 | |
| 1290 | if (!ppEnabledExtensionNames) return; |
| 1291 | |
| 1292 | for (uint32_t i = 0; i < enabledExtensionCount; ++i) { |
| 1293 | info.enabledExtensions.insert(ppEnabledExtensionNames[i]); |
| 1294 | } |
| 1295 | } |
| 1296 | |
| 1297 | void ResourceTracker::setDeviceInfo(VkDevice device, VkPhysicalDevice physdev, |
| 1298 | VkPhysicalDeviceProperties props, |
| 1299 | VkPhysicalDeviceMemoryProperties memProps, |
| 1300 | uint32_t enabledExtensionCount, |
| 1301 | const char* const* ppEnabledExtensionNames, const void* pNext) { |
| 1302 | AutoLock<RecursiveLock> lock(mLock); |
| 1303 | auto& info = info_VkDevice[device]; |
| 1304 | info.physdev = physdev; |
| 1305 | info.props = props; |
| 1306 | info.memProps = memProps; |
| 1307 | info.apiVersion = props.apiVersion; |
| 1308 | |
| 1309 | const VkBaseInStructure* extensionCreateInfo = |
| 1310 | reinterpret_cast<const VkBaseInStructure*>(pNext); |
| 1311 | while (extensionCreateInfo) { |
| 1312 | if (extensionCreateInfo->sType == |
| 1313 | VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) { |
| 1314 | auto deviceMemoryReportCreateInfo = |
| 1315 | reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>( |
| 1316 | extensionCreateInfo); |
| 1317 | if (deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) { |
| 1318 | info.deviceMemoryReportCallbacks.emplace_back( |
| 1319 | deviceMemoryReportCreateInfo->pfnUserCallback, |
| 1320 | deviceMemoryReportCreateInfo->pUserData); |
| 1321 | } |
| 1322 | } |
| 1323 | extensionCreateInfo = extensionCreateInfo->pNext; |
| 1324 | } |
| 1325 | |
| 1326 | if (!ppEnabledExtensionNames) return; |
| 1327 | |
| 1328 | for (uint32_t i = 0; i < enabledExtensionCount; ++i) { |
| 1329 | info.enabledExtensions.insert(ppEnabledExtensionNames[i]); |
| 1330 | } |
| 1331 | } |
| 1332 | |
| 1333 | void ResourceTracker::setDeviceMemoryInfo(VkDevice device, VkDeviceMemory memory, |
| 1334 | VkDeviceSize allocationSize, uint8_t* ptr, |
| 1335 | uint32_t memoryTypeIndex, AHardwareBuffer* ahw, |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 1336 | bool imported, zx_handle_t vmoHandle, |
Jason Macnak | 6d3d7b2 | 2024-04-01 16:48:53 -0700 | [diff] [blame] | 1337 | VirtGpuResourcePtr blobPtr) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1338 | AutoLock<RecursiveLock> lock(mLock); |
| 1339 | auto& info = info_VkDeviceMemory[memory]; |
| 1340 | |
| 1341 | info.device = device; |
| 1342 | info.allocationSize = allocationSize; |
| 1343 | info.ptr = ptr; |
| 1344 | info.memoryTypeIndex = memoryTypeIndex; |
| 1345 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 1346 | info.ahw = ahw; |
| 1347 | #endif |
| 1348 | info.imported = imported; |
| 1349 | info.vmoHandle = vmoHandle; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 1350 | info.blobPtr = blobPtr; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1351 | } |
| 1352 | |
| 1353 | void ResourceTracker::setImageInfo(VkImage image, VkDevice device, |
| 1354 | const VkImageCreateInfo* pCreateInfo) { |
| 1355 | AutoLock<RecursiveLock> lock(mLock); |
| 1356 | auto& info = info_VkImage[image]; |
| 1357 | |
| 1358 | info.device = device; |
| 1359 | info.createInfo = *pCreateInfo; |
| 1360 | } |
| 1361 | |
| 1362 | uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) { |
| 1363 | AutoLock<RecursiveLock> lock(mLock); |
| 1364 | const auto it = info_VkDeviceMemory.find(memory); |
| 1365 | if (it == info_VkDeviceMemory.end()) return nullptr; |
| 1366 | |
| 1367 | const auto& info = it->second; |
| 1368 | return info.ptr; |
| 1369 | } |
| 1370 | |
| 1371 | VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) { |
| 1372 | AutoLock<RecursiveLock> lock(mLock); |
| 1373 | const auto it = info_VkDeviceMemory.find(memory); |
| 1374 | if (it == info_VkDeviceMemory.end()) return 0; |
| 1375 | |
| 1376 | const auto& info = it->second; |
| 1377 | return info.allocationSize; |
| 1378 | } |
| 1379 | |
| 1380 | bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) const { |
| 1381 | AutoLock<RecursiveLock> lock(mLock); |
| 1382 | const auto it = info_VkDeviceMemory.find(range.memory); |
| 1383 | if (it == info_VkDeviceMemory.end()) return false; |
| 1384 | const auto& info = it->second; |
| 1385 | |
| 1386 | if (!info.ptr) return false; |
| 1387 | |
| 1388 | VkDeviceSize offset = range.offset; |
| 1389 | VkDeviceSize size = range.size; |
| 1390 | |
| 1391 | if (size == VK_WHOLE_SIZE) { |
| 1392 | return offset <= info.allocationSize; |
| 1393 | } |
| 1394 | |
| 1395 | return offset + size <= info.allocationSize; |
| 1396 | } |
| 1397 | |
| 1398 | void ResourceTracker::setupCaps(uint32_t& noRenderControlEnc) { |
| 1399 | VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan); |
| 1400 | mCaps = instance->getCaps(); |
| 1401 | |
| 1402 | // Delete once goldfish Linux drivers are gone |
| 1403 | if (mCaps.vulkanCapset.protocolVersion == 0) { |
| 1404 | mCaps.vulkanCapset.colorBufferMemoryIndex = 0xFFFFFFFF; |
| 1405 | } else { |
| 1406 | // Don't query the render control encoder for features, since for virtio-gpu the |
| 1407 | // capabilities provide versioning. Set features to be unconditionally true, since |
| 1408 | // using virtio-gpu encompasses all prior goldfish features. mFeatureInfo should be |
| 1409 | // deprecated in favor of caps. |
| 1410 | |
| 1411 | mFeatureInfo.reset(new EmulatorFeatureInfo); |
| 1412 | |
| 1413 | mFeatureInfo->hasVulkanNullOptionalStrings = true; |
| 1414 | mFeatureInfo->hasVulkanIgnoredHandles = true; |
| 1415 | mFeatureInfo->hasVulkanShaderFloat16Int8 = true; |
| 1416 | mFeatureInfo->hasVulkanQueueSubmitWithCommands = true; |
| 1417 | mFeatureInfo->hasDeferredVulkanCommands = true; |
| 1418 | mFeatureInfo->hasVulkanAsyncQueueSubmit = true; |
| 1419 | mFeatureInfo->hasVulkanCreateResourcesWithRequirements = true; |
| 1420 | mFeatureInfo->hasVirtioGpuNext = true; |
| 1421 | mFeatureInfo->hasVirtioGpuNativeSync = true; |
| 1422 | mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate = true; |
| 1423 | mFeatureInfo->hasVulkanAsyncQsri = true; |
| 1424 | |
| 1425 | ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT; |
| 1426 | ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT; |
| 1427 | ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT; |
| 1428 | ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT; |
| 1429 | } |
| 1430 | |
| 1431 | noRenderControlEnc = mCaps.vulkanCapset.noRenderControlEnc; |
| 1432 | } |
| 1433 | |
| 1434 | void ResourceTracker::setupFeatures(const EmulatorFeatureInfo* features) { |
| 1435 | if (!features || mFeatureInfo) return; |
| 1436 | mFeatureInfo.reset(new EmulatorFeatureInfo); |
| 1437 | *mFeatureInfo = *features; |
Jason Macnak | 650c0c0 | 2023-07-20 16:06:53 -0700 | [diff] [blame] | 1438 | |
| 1439 | #if defined(__ANDROID__) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1440 | if (mFeatureInfo->hasDirectMem) { |
| 1441 | mGoldfishAddressSpaceBlockProvider.reset( |
| 1442 | new GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType::NoSubdevice)); |
| 1443 | } |
| 1444 | #endif // defined(__ANDROID__) |
| 1445 | |
| 1446 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 1447 | if (mFeatureInfo->hasVulkan) { |
| 1448 | fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{zx::channel( |
| 1449 | GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))}; |
| 1450 | if (!channel) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 1451 | mesa_loge("failed to open control device"); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1452 | abort(); |
| 1453 | } |
| 1454 | mControlDevice = |
| 1455 | fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(std::move(channel)); |
| 1456 | |
| 1457 | fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{ |
| 1458 | zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))}; |
| 1459 | if (!sysmem_channel) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 1460 | mesa_loge("failed to open sysmem connection"); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1461 | } |
| 1462 | mSysmemAllocator = |
| 1463 | fidl::WireSyncClient<fuchsia_sysmem::Allocator>(std::move(sysmem_channel)); |
| 1464 | char name[ZX_MAX_NAME_LEN] = {}; |
| 1465 | zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name)); |
| 1466 | std::string client_name(name); |
| 1467 | client_name += "-goldfish"; |
| 1468 | zx_info_handle_basic_t info; |
| 1469 | zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info), nullptr, |
| 1470 | nullptr); |
| 1471 | mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name), |
| 1472 | info.koid); |
| 1473 | } |
| 1474 | #endif |
| 1475 | |
| 1476 | if (mFeatureInfo->hasVulkanNullOptionalStrings) { |
| 1477 | ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT; |
| 1478 | } |
| 1479 | if (mFeatureInfo->hasVulkanIgnoredHandles) { |
| 1480 | ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT; |
| 1481 | } |
| 1482 | if (mFeatureInfo->hasVulkanShaderFloat16Int8) { |
| 1483 | ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT; |
| 1484 | } |
| 1485 | if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) { |
| 1486 | ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT; |
| 1487 | } |
| 1488 | } |
| 1489 | |
| 1490 | void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) { |
| 1491 | ResourceTracker::threadingCallbacks = callbacks; |
| 1492 | } |
| 1493 | |
| 1494 | bool ResourceTracker::hostSupportsVulkan() const { |
| 1495 | if (!mFeatureInfo) return false; |
| 1496 | |
| 1497 | return mFeatureInfo->hasVulkan; |
| 1498 | } |
| 1499 | |
| 1500 | bool ResourceTracker::usingDirectMapping() const { return true; } |
| 1501 | |
| 1502 | uint32_t ResourceTracker::getStreamFeatures() const { return ResourceTracker::streamFeatureBits; } |
| 1503 | |
| 1504 | bool ResourceTracker::supportsDeferredCommands() const { |
| 1505 | if (!mFeatureInfo) return false; |
| 1506 | return mFeatureInfo->hasDeferredVulkanCommands; |
| 1507 | } |
| 1508 | |
| 1509 | bool ResourceTracker::supportsAsyncQueueSubmit() const { |
| 1510 | if (!mFeatureInfo) return false; |
| 1511 | return mFeatureInfo->hasVulkanAsyncQueueSubmit; |
| 1512 | } |
| 1513 | |
| 1514 | bool ResourceTracker::supportsCreateResourcesWithRequirements() const { |
| 1515 | if (!mFeatureInfo) return false; |
| 1516 | return mFeatureInfo->hasVulkanCreateResourcesWithRequirements; |
| 1517 | } |
| 1518 | |
| 1519 | int ResourceTracker::getHostInstanceExtensionIndex(const std::string& extName) const { |
| 1520 | int i = 0; |
| 1521 | for (const auto& prop : mHostInstanceExtensions) { |
| 1522 | if (extName == std::string(prop.extensionName)) { |
| 1523 | return i; |
| 1524 | } |
| 1525 | ++i; |
| 1526 | } |
| 1527 | return -1; |
| 1528 | } |
| 1529 | |
| 1530 | int ResourceTracker::getHostDeviceExtensionIndex(const std::string& extName) const { |
| 1531 | int i = 0; |
| 1532 | for (const auto& prop : mHostDeviceExtensions) { |
| 1533 | if (extName == std::string(prop.extensionName)) { |
| 1534 | return i; |
| 1535 | } |
| 1536 | ++i; |
| 1537 | } |
| 1538 | return -1; |
| 1539 | } |
| 1540 | |
| 1541 | void ResourceTracker::deviceMemoryTransform_tohost(VkDeviceMemory* memory, uint32_t memoryCount, |
| 1542 | VkDeviceSize* offset, uint32_t offsetCount, |
| 1543 | VkDeviceSize* size, uint32_t sizeCount, |
| 1544 | uint32_t* typeIndex, uint32_t typeIndexCount, |
| 1545 | uint32_t* typeBits, uint32_t typeBitsCount) { |
| 1546 | (void)memoryCount; |
| 1547 | (void)offsetCount; |
| 1548 | (void)sizeCount; |
| 1549 | (void)typeIndex; |
| 1550 | (void)typeIndexCount; |
| 1551 | (void)typeBits; |
| 1552 | (void)typeBitsCount; |
| 1553 | |
| 1554 | if (memory) { |
| 1555 | AutoLock<RecursiveLock> lock(mLock); |
| 1556 | |
| 1557 | for (uint32_t i = 0; i < memoryCount; ++i) { |
| 1558 | VkDeviceMemory mem = memory[i]; |
| 1559 | |
| 1560 | auto it = info_VkDeviceMemory.find(mem); |
| 1561 | if (it == info_VkDeviceMemory.end()) return; |
| 1562 | |
| 1563 | const auto& info = it->second; |
| 1564 | |
| 1565 | if (!info.coherentMemory) continue; |
| 1566 | |
| 1567 | memory[i] = info.coherentMemory->getDeviceMemory(); |
| 1568 | |
| 1569 | if (offset) { |
| 1570 | offset[i] = info.coherentMemoryOffset + offset[i]; |
| 1571 | } |
| 1572 | |
| 1573 | if (size && size[i] == VK_WHOLE_SIZE) { |
| 1574 | size[i] = info.allocationSize; |
| 1575 | } |
| 1576 | |
| 1577 | // TODO |
| 1578 | (void)memory; |
| 1579 | (void)offset; |
| 1580 | (void)size; |
| 1581 | } |
| 1582 | } |
| 1583 | } |
| 1584 | |
| 1585 | uint32_t ResourceTracker::getColorBufferMemoryIndex(void* context, VkDevice device) { |
| 1586 | // Create test image to get the memory requirements |
| 1587 | VkEncoder* enc = (VkEncoder*)context; |
| 1588 | VkImageCreateInfo createInfo = { |
| 1589 | .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, |
| 1590 | .imageType = VK_IMAGE_TYPE_2D, |
| 1591 | .format = VK_FORMAT_R8G8B8A8_UNORM, |
| 1592 | .extent = {64, 64, 1}, |
| 1593 | .mipLevels = 1, |
| 1594 | .arrayLayers = 1, |
| 1595 | .samples = VK_SAMPLE_COUNT_1_BIT, |
| 1596 | .tiling = VK_IMAGE_TILING_OPTIMAL, |
| 1597 | .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | |
| 1598 | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | |
| 1599 | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, |
Yahan Zhou | 4e6f4df | 2023-10-17 10:45:03 -0700 | [diff] [blame] | 1600 | .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1601 | }; |
| 1602 | VkImage image = VK_NULL_HANDLE; |
| 1603 | VkResult res = enc->vkCreateImage(device, &createInfo, nullptr, &image, true /* do lock */); |
| 1604 | |
| 1605 | if (res != VK_SUCCESS) { |
| 1606 | return 0; |
| 1607 | } |
| 1608 | |
| 1609 | VkMemoryRequirements memReqs; |
| 1610 | enc->vkGetImageMemoryRequirements(device, image, &memReqs, true /* do lock */); |
| 1611 | enc->vkDestroyImage(device, image, nullptr, true /* do lock */); |
| 1612 | |
| 1613 | const VkPhysicalDeviceMemoryProperties& memProps = |
| 1614 | getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE); |
| 1615 | |
| 1616 | // Currently, host looks for the last index that has with memory |
| 1617 | // property type VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
| 1618 | VkMemoryPropertyFlags memoryProperty = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; |
| 1619 | for (int i = VK_MAX_MEMORY_TYPES - 1; i >= 0; --i) { |
| 1620 | if ((memReqs.memoryTypeBits & (1u << i)) && |
| 1621 | (memProps.memoryTypes[i].propertyFlags & memoryProperty)) { |
| 1622 | return i; |
| 1623 | } |
| 1624 | } |
| 1625 | |
| 1626 | return 0; |
| 1627 | } |
| 1628 | |
| 1629 | VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties( |
| 1630 | void* context, VkResult, const char*, uint32_t* pPropertyCount, |
| 1631 | VkExtensionProperties* pProperties) { |
| 1632 | std::vector<const char*> allowedExtensionNames = { |
| 1633 | "VK_KHR_get_physical_device_properties2", |
| 1634 | "VK_KHR_sampler_ycbcr_conversion", |
| 1635 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 1636 | "VK_KHR_external_semaphore_capabilities", |
| 1637 | "VK_KHR_external_memory_capabilities", |
| 1638 | "VK_KHR_external_fence_capabilities", |
Igor Chernyshev | 5ef37b1 | 2023-12-19 15:58:51 -0800 | [diff] [blame] | 1639 | "VK_EXT_debug_utils", |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1640 | #endif |
| 1641 | }; |
| 1642 | |
| 1643 | VkEncoder* enc = (VkEncoder*)context; |
| 1644 | |
| 1645 | // Only advertise a select set of extensions. |
| 1646 | if (mHostInstanceExtensions.empty()) { |
| 1647 | uint32_t hostPropCount = 0; |
| 1648 | enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr, |
| 1649 | true /* do lock */); |
| 1650 | mHostInstanceExtensions.resize(hostPropCount); |
| 1651 | |
| 1652 | VkResult hostRes = enc->vkEnumerateInstanceExtensionProperties( |
| 1653 | nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */); |
| 1654 | |
| 1655 | if (hostRes != VK_SUCCESS) { |
| 1656 | return hostRes; |
| 1657 | } |
| 1658 | } |
| 1659 | |
| 1660 | std::vector<VkExtensionProperties> filteredExts; |
| 1661 | |
| 1662 | for (size_t i = 0; i < allowedExtensionNames.size(); ++i) { |
| 1663 | auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]); |
| 1664 | if (extIndex != -1) { |
| 1665 | filteredExts.push_back(mHostInstanceExtensions[extIndex]); |
| 1666 | } |
| 1667 | } |
| 1668 | |
| 1669 | VkExtensionProperties anbExtProps[] = { |
| 1670 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 1671 | {"VK_KHR_external_memory_capabilities", 1}, |
| 1672 | {"VK_KHR_external_semaphore_capabilities", 1}, |
| 1673 | #endif |
| 1674 | }; |
| 1675 | |
| 1676 | for (auto& anbExtProp : anbExtProps) { |
| 1677 | filteredExts.push_back(anbExtProp); |
| 1678 | } |
| 1679 | |
| 1680 | // Spec: |
| 1681 | // |
| 1682 | // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html |
| 1683 | // |
| 1684 | // If pProperties is NULL, then the number of extensions properties |
| 1685 | // available is returned in pPropertyCount. Otherwise, pPropertyCount |
| 1686 | // must point to a variable set by the user to the number of elements |
| 1687 | // in the pProperties array, and on return the variable is overwritten |
| 1688 | // with the number of structures actually written to pProperties. If |
| 1689 | // pPropertyCount is less than the number of extension properties |
| 1690 | // available, at most pPropertyCount structures will be written. If |
| 1691 | // pPropertyCount is smaller than the number of extensions available, |
| 1692 | // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate |
| 1693 | // that not all the available properties were returned. |
| 1694 | // |
| 1695 | // pPropertyCount must be a valid pointer to a uint32_t value |
| 1696 | if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED; |
| 1697 | |
| 1698 | if (!pProperties) { |
| 1699 | *pPropertyCount = (uint32_t)filteredExts.size(); |
| 1700 | return VK_SUCCESS; |
| 1701 | } else { |
| 1702 | auto actualExtensionCount = (uint32_t)filteredExts.size(); |
| 1703 | if (*pPropertyCount > actualExtensionCount) { |
| 1704 | *pPropertyCount = actualExtensionCount; |
| 1705 | } |
| 1706 | |
| 1707 | for (uint32_t i = 0; i < *pPropertyCount; ++i) { |
| 1708 | pProperties[i] = filteredExts[i]; |
| 1709 | } |
| 1710 | |
| 1711 | if (actualExtensionCount > *pPropertyCount) { |
| 1712 | return VK_INCOMPLETE; |
| 1713 | } |
| 1714 | |
| 1715 | return VK_SUCCESS; |
| 1716 | } |
| 1717 | } |
| 1718 | |
| 1719 | VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties( |
| 1720 | void* context, VkResult, VkPhysicalDevice physdev, const char*, uint32_t* pPropertyCount, |
| 1721 | VkExtensionProperties* pProperties) { |
| 1722 | std::vector<const char*> allowedExtensionNames = { |
| 1723 | "VK_KHR_vulkan_memory_model", |
| 1724 | "VK_KHR_buffer_device_address", |
| 1725 | "VK_KHR_maintenance1", |
| 1726 | "VK_KHR_maintenance2", |
| 1727 | "VK_KHR_maintenance3", |
| 1728 | "VK_KHR_bind_memory2", |
| 1729 | "VK_KHR_dedicated_allocation", |
| 1730 | "VK_KHR_get_memory_requirements2", |
| 1731 | "VK_KHR_sampler_ycbcr_conversion", |
| 1732 | "VK_KHR_shader_float16_int8", |
| 1733 | // Timeline semaphores buggy in newer NVIDIA drivers |
| 1734 | // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock) |
| 1735 | #ifndef VK_USE_PLATFORM_ANDROID_KHR |
| 1736 | "VK_KHR_timeline_semaphore", |
| 1737 | #endif |
| 1738 | "VK_AMD_gpu_shader_half_float", |
| 1739 | "VK_NV_shader_subgroup_partitioned", |
| 1740 | "VK_KHR_shader_subgroup_extended_types", |
| 1741 | "VK_EXT_subgroup_size_control", |
| 1742 | "VK_EXT_provoking_vertex", |
| 1743 | "VK_EXT_line_rasterization", |
| 1744 | "VK_KHR_shader_terminate_invocation", |
| 1745 | "VK_EXT_transform_feedback", |
| 1746 | "VK_EXT_primitive_topology_list_restart", |
| 1747 | "VK_EXT_index_type_uint8", |
| 1748 | "VK_EXT_load_store_op_none", |
| 1749 | "VK_EXT_swapchain_colorspace", |
| 1750 | "VK_EXT_image_robustness", |
| 1751 | "VK_EXT_custom_border_color", |
| 1752 | "VK_EXT_shader_stencil_export", |
| 1753 | "VK_KHR_image_format_list", |
| 1754 | "VK_KHR_incremental_present", |
| 1755 | "VK_KHR_pipeline_executable_properties", |
| 1756 | "VK_EXT_queue_family_foreign", |
Igor Chernyshev | 5ef37b1 | 2023-12-19 15:58:51 -0800 | [diff] [blame] | 1757 | "VK_EXT_scalar_block_layout", |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1758 | "VK_KHR_descriptor_update_template", |
| 1759 | "VK_KHR_storage_buffer_storage_class", |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 1760 | "VK_EXT_depth_clip_enable", |
Igor Chernyshev | 5467671 | 2024-01-15 13:12:49 -0800 | [diff] [blame] | 1761 | "VK_KHR_create_renderpass2", |
Gurchetan Singh | 30c4bbc | 2024-06-13 14:59:51 +0000 | [diff] [blame] | 1762 | "VK_EXT_vertex_attribute_divisor", |
Aaron Germuth | 6bde771 | 2024-05-30 20:02:09 +0000 | [diff] [blame] | 1763 | "VK_EXT_host_query_reset", |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1764 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 1765 | "VK_KHR_external_semaphore", |
| 1766 | "VK_KHR_external_semaphore_fd", |
| 1767 | // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd |
| 1768 | "VK_KHR_external_memory", |
| 1769 | "VK_KHR_external_fence", |
| 1770 | "VK_KHR_external_fence_fd", |
| 1771 | "VK_EXT_device_memory_report", |
| 1772 | #endif |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 1773 | #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1774 | "VK_KHR_imageless_framebuffer", |
| 1775 | #endif |
Yahan Zhou | a1531d1 | 2023-10-04 15:28:19 -0700 | [diff] [blame] | 1776 | // Vulkan 1.3 |
| 1777 | "VK_KHR_synchronization2", |
| 1778 | "VK_EXT_private_data", |
Gurchetan Singh | 91db886 | 2024-06-21 09:43:55 -0700 | [diff] [blame] | 1779 | "VK_EXT_color_write_enable", |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1780 | }; |
| 1781 | |
| 1782 | VkEncoder* enc = (VkEncoder*)context; |
| 1783 | |
| 1784 | if (mHostDeviceExtensions.empty()) { |
| 1785 | uint32_t hostPropCount = 0; |
| 1786 | enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr, |
| 1787 | true /* do lock */); |
| 1788 | mHostDeviceExtensions.resize(hostPropCount); |
| 1789 | |
| 1790 | VkResult hostRes = enc->vkEnumerateDeviceExtensionProperties( |
| 1791 | physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */); |
| 1792 | |
| 1793 | if (hostRes != VK_SUCCESS) { |
| 1794 | return hostRes; |
| 1795 | } |
| 1796 | } |
| 1797 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1798 | std::vector<VkExtensionProperties> filteredExts; |
| 1799 | |
| 1800 | for (size_t i = 0; i < allowedExtensionNames.size(); ++i) { |
| 1801 | auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]); |
| 1802 | if (extIndex != -1) { |
| 1803 | filteredExts.push_back(mHostDeviceExtensions[extIndex]); |
| 1804 | } |
| 1805 | } |
| 1806 | |
| 1807 | VkExtensionProperties anbExtProps[] = { |
| 1808 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 1809 | {"VK_ANDROID_native_buffer", 7}, |
| 1810 | #endif |
| 1811 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 1812 | {"VK_KHR_external_memory", 1}, |
| 1813 | {"VK_KHR_external_semaphore", 1}, |
| 1814 | {"VK_FUCHSIA_external_semaphore", 1}, |
| 1815 | #endif |
| 1816 | }; |
| 1817 | |
| 1818 | for (auto& anbExtProp : anbExtProps) { |
| 1819 | filteredExts.push_back(anbExtProp); |
| 1820 | } |
| 1821 | |
Gurchetan Singh | 3057f91 | 2024-04-29 18:07:14 -0700 | [diff] [blame] | 1822 | /* |
| 1823 | * GfxstreamEnd2EndVkTest::DeviceMemoryReport always assumes the memory report |
| 1824 | * extension is present. It's is filtered out when sent host side, since for a |
| 1825 | * virtual GPU this is quite difficult to implement. |
| 1826 | * |
| 1827 | * Mesa runtime checks physical device features. So if the test tries to enable |
| 1828 | * device level extension without it definitely existing, the test will fail. |
| 1829 | * |
| 1830 | * The test can also be modified to check VkPhysicalDeviceDeviceMemoryReportFeaturesEXT, |
| 1831 | * but that's more involved. Work around this by always advertising the extension. |
| 1832 | * Tracking bug: b/338270042 |
| 1833 | */ |
| 1834 | filteredExts.push_back(VkExtensionProperties{"VK_EXT_device_memory_report", 1}); |
| 1835 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1836 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 1837 | bool hostSupportsExternalFenceFd = |
| 1838 | getHostDeviceExtensionIndex("VK_KHR_external_fence_fd") != -1; |
| 1839 | if (!hostSupportsExternalFenceFd) { |
| 1840 | filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_fence_fd", 1}); |
| 1841 | } |
| 1842 | #endif |
| 1843 | |
| 1844 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Aaron Ruby | aef68fd | 2024-01-16 16:55:43 -0500 | [diff] [blame] | 1845 | bool hostHasPosixExternalSemaphore = |
| 1846 | getHostDeviceExtensionIndex("VK_KHR_external_semaphore_fd") != -1; |
| 1847 | if (!hostHasPosixExternalSemaphore) { |
| 1848 | // Always advertise posix external semaphore capabilities on Android/Linux. |
| 1849 | // SYNC_FD handles will always work, regardless of host support. Support |
| 1850 | // for non-sync, opaque FDs, depends on host driver support, but will |
| 1851 | // be handled accordingly by host. |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1852 | filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_semaphore_fd", 1}); |
| 1853 | } |
| 1854 | #endif |
| 1855 | |
| 1856 | bool win32ExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_win32") != -1; |
| 1857 | bool posixExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_fd") != -1; |
Serdar Kocdemir | fa050d1 | 2024-06-26 22:34:31 +0100 | [diff] [blame] | 1858 | //TODO(b/349066492): this should check external_memory_metal extension when it's ready |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1859 | bool moltenVkExtAvailable = getHostDeviceExtensionIndex("VK_MVK_moltenvk") != -1; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 1860 | bool qnxExtMemAvailable = |
| 1861 | getHostDeviceExtensionIndex("VK_QNX_external_memory_screen_buffer") != -1; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1862 | |
| 1863 | bool hostHasExternalMemorySupport = |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 1864 | win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable || qnxExtMemAvailable; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1865 | |
| 1866 | if (hostHasExternalMemorySupport) { |
| 1867 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 1868 | filteredExts.push_back( |
| 1869 | VkExtensionProperties{"VK_ANDROID_external_memory_android_hardware_buffer", 7}); |
| 1870 | filteredExts.push_back(VkExtensionProperties{"VK_EXT_queue_family_foreign", 1}); |
| 1871 | #endif |
| 1872 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 1873 | filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_external_memory", 1}); |
| 1874 | filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_buffer_collection", 1}); |
| 1875 | #endif |
| 1876 | #if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__) |
| 1877 | filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_memory_fd", 1}); |
| 1878 | filteredExts.push_back(VkExtensionProperties{"VK_EXT_external_memory_dma_buf", 1}); |
| 1879 | #endif |
| 1880 | } |
| 1881 | |
Jason Macnak | d7c55fb | 2023-12-08 13:15:44 -0800 | [diff] [blame] | 1882 | // NOTE: the Vulkan Loader's trampoline functions will remove duplicates. This can lead |
| 1883 | // to lead errors if this function returns VK_SUCCESS with N elements (including a duplicate) |
| 1884 | // but the Vulkan Loader's trampoline function returns VK_INCOMPLETE with N-1 elements |
| 1885 | // (without the duplicate). |
| 1886 | std::sort(filteredExts.begin(), |
| 1887 | filteredExts.end(), |
| 1888 | [](const VkExtensionProperties& a, |
| 1889 | const VkExtensionProperties& b) { |
| 1890 | return strcmp(a.extensionName, b.extensionName) < 0; |
| 1891 | }); |
| 1892 | filteredExts.erase(std::unique(filteredExts.begin(), |
| 1893 | filteredExts.end(), |
| 1894 | [](const VkExtensionProperties& a, |
| 1895 | const VkExtensionProperties& b) { |
| 1896 | return strcmp(a.extensionName, b.extensionName) == 0; |
| 1897 | }), |
| 1898 | filteredExts.end()); |
| 1899 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1900 | // Spec: |
| 1901 | // |
| 1902 | // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html |
| 1903 | // |
| 1904 | // pPropertyCount is a pointer to an integer related to the number of |
| 1905 | // extension properties available or queried, and is treated in the |
| 1906 | // same fashion as the |
| 1907 | // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter. |
| 1908 | // |
| 1909 | // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html |
| 1910 | // |
| 1911 | // If pProperties is NULL, then the number of extensions properties |
| 1912 | // available is returned in pPropertyCount. Otherwise, pPropertyCount |
| 1913 | // must point to a variable set by the user to the number of elements |
| 1914 | // in the pProperties array, and on return the variable is overwritten |
| 1915 | // with the number of structures actually written to pProperties. If |
| 1916 | // pPropertyCount is less than the number of extension properties |
| 1917 | // available, at most pPropertyCount structures will be written. If |
| 1918 | // pPropertyCount is smaller than the number of extensions available, |
| 1919 | // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate |
| 1920 | // that not all the available properties were returned. |
| 1921 | // |
| 1922 | // pPropertyCount must be a valid pointer to a uint32_t value |
| 1923 | |
| 1924 | if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED; |
| 1925 | |
| 1926 | if (!pProperties) { |
| 1927 | *pPropertyCount = (uint32_t)filteredExts.size(); |
| 1928 | return VK_SUCCESS; |
| 1929 | } else { |
| 1930 | auto actualExtensionCount = (uint32_t)filteredExts.size(); |
| 1931 | if (*pPropertyCount > actualExtensionCount) { |
| 1932 | *pPropertyCount = actualExtensionCount; |
| 1933 | } |
| 1934 | |
| 1935 | for (uint32_t i = 0; i < *pPropertyCount; ++i) { |
| 1936 | pProperties[i] = filteredExts[i]; |
| 1937 | } |
| 1938 | |
| 1939 | if (actualExtensionCount > *pPropertyCount) { |
| 1940 | return VK_INCOMPLETE; |
| 1941 | } |
| 1942 | |
| 1943 | return VK_SUCCESS; |
| 1944 | } |
| 1945 | } |
| 1946 | |
| 1947 | VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(void* context, VkResult, |
| 1948 | VkInstance instance, |
| 1949 | uint32_t* pPhysicalDeviceCount, |
| 1950 | VkPhysicalDevice* pPhysicalDevices) { |
| 1951 | VkEncoder* enc = (VkEncoder*)context; |
| 1952 | |
| 1953 | if (!instance) return VK_ERROR_INITIALIZATION_FAILED; |
| 1954 | |
| 1955 | if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED; |
| 1956 | |
| 1957 | AutoLock<RecursiveLock> lock(mLock); |
| 1958 | |
| 1959 | // When this function is called, we actually need to do two things: |
| 1960 | // - Get full information about physical devices from the host, |
| 1961 | // even if the guest did not ask for it |
| 1962 | // - Serve the guest query according to the spec: |
| 1963 | // |
| 1964 | // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html |
| 1965 | |
| 1966 | auto it = info_VkInstance.find(instance); |
| 1967 | |
| 1968 | if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED; |
| 1969 | |
| 1970 | auto& info = it->second; |
| 1971 | |
| 1972 | // Get the full host information here if it doesn't exist already. |
| 1973 | if (info.physicalDevices.empty()) { |
| 1974 | uint32_t hostPhysicalDeviceCount = 0; |
| 1975 | |
| 1976 | lock.unlock(); |
| 1977 | VkResult countRes = enc->vkEnumeratePhysicalDevices(instance, &hostPhysicalDeviceCount, |
| 1978 | nullptr, false /* no lock */); |
| 1979 | lock.lock(); |
| 1980 | |
| 1981 | if (countRes != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 1982 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1983 | "%s: failed: could not count host physical devices. " |
| 1984 | "Error %d\n", |
| 1985 | __func__, countRes); |
| 1986 | return countRes; |
| 1987 | } |
| 1988 | |
| 1989 | info.physicalDevices.resize(hostPhysicalDeviceCount); |
| 1990 | |
| 1991 | lock.unlock(); |
| 1992 | VkResult enumRes = enc->vkEnumeratePhysicalDevices( |
| 1993 | instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */); |
| 1994 | lock.lock(); |
| 1995 | |
| 1996 | if (enumRes != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 1997 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 1998 | "%s: failed: could not retrieve host physical devices. " |
| 1999 | "Error %d\n", |
| 2000 | __func__, enumRes); |
| 2001 | return enumRes; |
| 2002 | } |
| 2003 | } |
| 2004 | |
| 2005 | // Serve the guest query according to the spec. |
| 2006 | // |
| 2007 | // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html |
| 2008 | // |
| 2009 | // If pPhysicalDevices is NULL, then the number of physical devices |
| 2010 | // available is returned in pPhysicalDeviceCount. Otherwise, |
| 2011 | // pPhysicalDeviceCount must point to a variable set by the user to the |
| 2012 | // number of elements in the pPhysicalDevices array, and on return the |
| 2013 | // variable is overwritten with the number of handles actually written |
| 2014 | // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number |
| 2015 | // of physical devices available, at most pPhysicalDeviceCount |
| 2016 | // structures will be written. If pPhysicalDeviceCount is smaller than |
| 2017 | // the number of physical devices available, VK_INCOMPLETE will be |
| 2018 | // returned instead of VK_SUCCESS, to indicate that not all the |
| 2019 | // available physical devices were returned. |
| 2020 | |
| 2021 | if (!pPhysicalDevices) { |
| 2022 | *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size(); |
| 2023 | return VK_SUCCESS; |
| 2024 | } else { |
| 2025 | uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size(); |
| 2026 | uint32_t toWrite = |
| 2027 | actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount; |
| 2028 | |
| 2029 | for (uint32_t i = 0; i < toWrite; ++i) { |
| 2030 | pPhysicalDevices[i] = info.physicalDevices[i]; |
| 2031 | } |
| 2032 | |
| 2033 | *pPhysicalDeviceCount = toWrite; |
| 2034 | |
| 2035 | if (actualDeviceCount > *pPhysicalDeviceCount) { |
| 2036 | return VK_INCOMPLETE; |
| 2037 | } |
| 2038 | |
| 2039 | return VK_SUCCESS; |
| 2040 | } |
| 2041 | } |
| 2042 | |
| 2043 | void ResourceTracker::on_vkGetPhysicalDeviceProperties(void*, VkPhysicalDevice, |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 2044 | VkPhysicalDeviceProperties* pProperties) { |
| 2045 | #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR) |
| 2046 | if (pProperties) { |
| 2047 | if (VK_PHYSICAL_DEVICE_TYPE_CPU == pProperties->deviceType) { |
| 2048 | /* For Linux guest: Even if host driver reports DEVICE_TYPE_CPU, |
| 2049 | * override this to VIRTUAL_GPU, otherwise Linux DRM interfaces |
| 2050 | * will take unexpected code paths to deal with "software" driver |
| 2051 | */ |
| 2052 | pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU; |
| 2053 | } |
| 2054 | } |
| 2055 | #endif |
| 2056 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2057 | |
| 2058 | void ResourceTracker::on_vkGetPhysicalDeviceFeatures2(void*, VkPhysicalDevice, |
| 2059 | VkPhysicalDeviceFeatures2* pFeatures) { |
| 2060 | if (pFeatures) { |
| 2061 | VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT = |
| 2062 | vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pFeatures); |
| 2063 | if (memoryReportFeaturesEXT) { |
| 2064 | memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE; |
| 2065 | } |
| 2066 | } |
| 2067 | } |
| 2068 | |
| 2069 | void ResourceTracker::on_vkGetPhysicalDeviceFeatures2KHR(void* context, |
| 2070 | VkPhysicalDevice physicalDevice, |
| 2071 | VkPhysicalDeviceFeatures2* pFeatures) { |
| 2072 | on_vkGetPhysicalDeviceFeatures2(context, physicalDevice, pFeatures); |
| 2073 | } |
| 2074 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 2075 | void ResourceTracker::on_vkGetPhysicalDeviceProperties2(void* context, |
| 2076 | VkPhysicalDevice physicalDevice, |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2077 | VkPhysicalDeviceProperties2* pProperties) { |
| 2078 | if (pProperties) { |
| 2079 | VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT = |
| 2080 | vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties); |
| 2081 | if (memoryReportFeaturesEXT) { |
| 2082 | memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE; |
| 2083 | } |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 2084 | on_vkGetPhysicalDeviceProperties(context, physicalDevice, &pProperties->properties); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2085 | } |
| 2086 | } |
| 2087 | |
| 2088 | void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR( |
| 2089 | void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties) { |
| 2090 | on_vkGetPhysicalDeviceProperties2(context, physicalDevice, pProperties); |
| 2091 | } |
| 2092 | |
| 2093 | void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties( |
| 2094 | void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* out) { |
| 2095 | // gfxstream decides which physical device to expose to the guest on startup. |
| 2096 | // Otherwise, we would need a physical device to properties mapping. |
| 2097 | *out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice); |
| 2098 | } |
| 2099 | |
| 2100 | void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2( |
| 2101 | void*, VkPhysicalDevice physdev, VkPhysicalDeviceMemoryProperties2* out) { |
| 2102 | on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties); |
| 2103 | } |
| 2104 | |
| 2105 | void ResourceTracker::on_vkGetDeviceQueue(void*, VkDevice device, uint32_t, uint32_t, |
| 2106 | VkQueue* pQueue) { |
| 2107 | AutoLock<RecursiveLock> lock(mLock); |
| 2108 | info_VkQueue[*pQueue].device = device; |
| 2109 | } |
| 2110 | |
| 2111 | void ResourceTracker::on_vkGetDeviceQueue2(void*, VkDevice device, const VkDeviceQueueInfo2*, |
| 2112 | VkQueue* pQueue) { |
| 2113 | AutoLock<RecursiveLock> lock(mLock); |
| 2114 | info_VkQueue[*pQueue].device = device; |
| 2115 | } |
| 2116 | |
| 2117 | VkResult ResourceTracker::on_vkCreateInstance(void* context, VkResult input_result, |
| 2118 | const VkInstanceCreateInfo* createInfo, |
| 2119 | const VkAllocationCallbacks*, VkInstance* pInstance) { |
| 2120 | if (input_result != VK_SUCCESS) return input_result; |
| 2121 | |
| 2122 | VkEncoder* enc = (VkEncoder*)context; |
| 2123 | |
| 2124 | uint32_t apiVersion; |
| 2125 | VkResult enumInstanceVersionRes = |
| 2126 | enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */); |
| 2127 | |
| 2128 | setInstanceInfo(*pInstance, createInfo->enabledExtensionCount, |
| 2129 | createInfo->ppEnabledExtensionNames, apiVersion); |
| 2130 | |
| 2131 | return input_result; |
| 2132 | } |
| 2133 | |
| 2134 | VkResult ResourceTracker::on_vkCreateDevice(void* context, VkResult input_result, |
| 2135 | VkPhysicalDevice physicalDevice, |
| 2136 | const VkDeviceCreateInfo* pCreateInfo, |
| 2137 | const VkAllocationCallbacks*, VkDevice* pDevice) { |
| 2138 | if (input_result != VK_SUCCESS) return input_result; |
| 2139 | |
| 2140 | VkEncoder* enc = (VkEncoder*)context; |
| 2141 | |
| 2142 | VkPhysicalDeviceProperties props; |
| 2143 | VkPhysicalDeviceMemoryProperties memProps; |
| 2144 | enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */); |
| 2145 | enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */); |
| 2146 | |
| 2147 | setDeviceInfo(*pDevice, physicalDevice, props, memProps, pCreateInfo->enabledExtensionCount, |
| 2148 | pCreateInfo->ppEnabledExtensionNames, pCreateInfo->pNext); |
| 2149 | |
| 2150 | return input_result; |
| 2151 | } |
| 2152 | |
| 2153 | void ResourceTracker::on_vkDestroyDevice_pre(void* context, VkDevice device, |
| 2154 | const VkAllocationCallbacks*) { |
| 2155 | (void)context; |
| 2156 | AutoLock<RecursiveLock> lock(mLock); |
| 2157 | |
| 2158 | auto it = info_VkDevice.find(device); |
| 2159 | if (it == info_VkDevice.end()) return; |
| 2160 | |
| 2161 | for (auto itr = info_VkDeviceMemory.cbegin(); itr != info_VkDeviceMemory.cend();) { |
| 2162 | auto& memInfo = itr->second; |
| 2163 | if (memInfo.device == device) { |
| 2164 | itr = info_VkDeviceMemory.erase(itr); |
| 2165 | } else { |
| 2166 | itr++; |
| 2167 | } |
| 2168 | } |
| 2169 | } |
| 2170 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 2171 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 2172 | void updateMemoryTypeBits(uint32_t* memoryTypeBits, uint32_t memoryIndex) { |
| 2173 | *memoryTypeBits = 1u << memoryIndex; |
| 2174 | } |
| 2175 | #endif |
| 2176 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2177 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 2178 | |
| 2179 | VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID( |
| 2180 | void* context, VkResult, VkDevice device, const AHardwareBuffer* buffer, |
| 2181 | VkAndroidHardwareBufferPropertiesANDROID* pProperties) { |
| 2182 | auto grallocHelper = |
| 2183 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(); |
| 2184 | |
| 2185 | // Delete once goldfish Linux drivers are gone |
| 2186 | if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) { |
| 2187 | mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device); |
| 2188 | } |
| 2189 | |
| 2190 | updateMemoryTypeBits(&pProperties->memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex); |
| 2191 | |
| 2192 | return getAndroidHardwareBufferPropertiesANDROID(grallocHelper, buffer, pProperties); |
| 2193 | } |
| 2194 | |
| 2195 | VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID( |
| 2196 | void*, VkResult, VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, |
| 2197 | struct AHardwareBuffer** pBuffer) { |
| 2198 | if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED; |
| 2199 | if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED; |
| 2200 | |
| 2201 | AutoLock<RecursiveLock> lock(mLock); |
| 2202 | |
| 2203 | auto deviceIt = info_VkDevice.find(device); |
| 2204 | |
| 2205 | if (deviceIt == info_VkDevice.end()) { |
| 2206 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2207 | } |
| 2208 | |
| 2209 | auto memoryIt = info_VkDeviceMemory.find(pInfo->memory); |
| 2210 | |
| 2211 | if (memoryIt == info_VkDeviceMemory.end()) { |
| 2212 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2213 | } |
| 2214 | |
| 2215 | auto& info = memoryIt->second; |
| 2216 | |
| 2217 | auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(); |
| 2218 | VkResult queryRes = getMemoryAndroidHardwareBufferANDROID(gralloc, &info.ahw); |
| 2219 | |
| 2220 | if (queryRes != VK_SUCCESS) return queryRes; |
| 2221 | |
| 2222 | *pBuffer = info.ahw; |
| 2223 | |
| 2224 | return queryRes; |
| 2225 | } |
| 2226 | #endif |
| 2227 | |
| 2228 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 2229 | VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA( |
| 2230 | void*, VkResult, VkDevice device, const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo, |
| 2231 | uint32_t* pHandle) { |
| 2232 | if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED; |
| 2233 | if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED; |
| 2234 | |
| 2235 | AutoLock<RecursiveLock> lock(mLock); |
| 2236 | |
| 2237 | auto deviceIt = info_VkDevice.find(device); |
| 2238 | |
| 2239 | if (deviceIt == info_VkDevice.end()) { |
| 2240 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2241 | } |
| 2242 | |
| 2243 | auto memoryIt = info_VkDeviceMemory.find(pInfo->memory); |
| 2244 | |
| 2245 | if (memoryIt == info_VkDeviceMemory.end()) { |
| 2246 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2247 | } |
| 2248 | |
| 2249 | auto& info = memoryIt->second; |
| 2250 | |
| 2251 | if (info.vmoHandle == ZX_HANDLE_INVALID) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2252 | mesa_loge("%s: memory cannot be exported", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2253 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2254 | } |
| 2255 | |
| 2256 | *pHandle = ZX_HANDLE_INVALID; |
| 2257 | zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle); |
| 2258 | return VK_SUCCESS; |
| 2259 | } |
| 2260 | |
| 2261 | VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA( |
| 2262 | void*, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, |
| 2263 | uint32_t handle, VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) { |
| 2264 | using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal; |
| 2265 | using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible; |
| 2266 | |
| 2267 | if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) { |
| 2268 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2269 | } |
| 2270 | |
| 2271 | zx_info_handle_basic_t handleInfo; |
| 2272 | zx_status_t status = zx::unowned_vmo(handle)->get_info(ZX_INFO_HANDLE_BASIC, &handleInfo, |
| 2273 | sizeof(handleInfo), nullptr, nullptr); |
| 2274 | if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) { |
| 2275 | return VK_ERROR_INVALID_EXTERNAL_HANDLE; |
| 2276 | } |
| 2277 | |
| 2278 | AutoLock<RecursiveLock> lock(mLock); |
| 2279 | |
| 2280 | auto deviceIt = info_VkDevice.find(device); |
| 2281 | |
| 2282 | if (deviceIt == info_VkDevice.end()) { |
| 2283 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2284 | } |
| 2285 | |
| 2286 | auto& info = deviceIt->second; |
| 2287 | |
| 2288 | zx::vmo vmo_dup; |
| 2289 | status = zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup); |
| 2290 | if (status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2291 | mesa_loge("zx_handle_duplicate() error: %d", status); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2292 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2293 | } |
| 2294 | |
| 2295 | uint32_t memoryProperty = 0u; |
| 2296 | |
| 2297 | auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup)); |
| 2298 | if (!result.ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2299 | mesa_loge("mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d", result.status()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2300 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2301 | } |
| 2302 | if (result.value().is_ok()) { |
| 2303 | memoryProperty = result.value().value()->info.memory_property(); |
| 2304 | } else if (result.value().error_value() == ZX_ERR_NOT_FOUND) { |
| 2305 | // If a VMO is allocated while ColorBuffer/Buffer is not created, |
| 2306 | // it must be a device-local buffer, since for host-visible buffers, |
| 2307 | // ColorBuffer/Buffer is created at sysmem allocation time. |
| 2308 | memoryProperty = kMemoryPropertyDeviceLocal; |
| 2309 | } else { |
| 2310 | // Importing read-only host memory into the Vulkan driver should not |
| 2311 | // work, but it is not an error to try to do so. Returning a |
| 2312 | // VkMemoryZirconHandlePropertiesFUCHSIA with no available |
Mitchell Kember | d9f3e4a | 2024-01-29 16:53:33 -0800 | [diff] [blame] | 2313 | // memoryType bits should be enough for clients. See fxbug.dev/42098398 |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2314 | // for other issues this this flow. |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2315 | mesa_logw("GetBufferHandleInfo failed: %d", result.value().error_value()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2316 | pProperties->memoryTypeBits = 0; |
| 2317 | return VK_SUCCESS; |
| 2318 | } |
| 2319 | |
| 2320 | pProperties->memoryTypeBits = 0; |
| 2321 | for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) { |
| 2322 | if (((memoryProperty & kMemoryPropertyDeviceLocal) && |
| 2323 | (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) || |
| 2324 | ((memoryProperty & kMemoryPropertyHostVisible) && |
| 2325 | (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) { |
| 2326 | pProperties->memoryTypeBits |= 1ull << i; |
| 2327 | } |
| 2328 | } |
| 2329 | return VK_SUCCESS; |
| 2330 | } |
| 2331 | |
| 2332 | zx_koid_t getEventKoid(zx_handle_t eventHandle) { |
| 2333 | if (eventHandle == ZX_HANDLE_INVALID) { |
| 2334 | return ZX_KOID_INVALID; |
| 2335 | } |
| 2336 | |
| 2337 | zx_info_handle_basic_t info; |
| 2338 | zx_status_t status = zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info, sizeof(info), |
| 2339 | nullptr, nullptr); |
| 2340 | if (status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2341 | mesa_loge("Cannot get object info of handle %u: %d", eventHandle, status); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2342 | return ZX_KOID_INVALID; |
| 2343 | } |
| 2344 | return info.koid; |
| 2345 | } |
| 2346 | |
| 2347 | VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA( |
| 2348 | void*, VkResult, VkDevice device, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) { |
| 2349 | if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED; |
| 2350 | if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED; |
| 2351 | |
| 2352 | AutoLock<RecursiveLock> lock(mLock); |
| 2353 | |
| 2354 | auto deviceIt = info_VkDevice.find(device); |
| 2355 | |
| 2356 | if (deviceIt == info_VkDevice.end()) { |
| 2357 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2358 | } |
| 2359 | |
| 2360 | auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore); |
| 2361 | |
| 2362 | if (semaphoreIt == info_VkSemaphore.end()) { |
| 2363 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2364 | } |
| 2365 | |
| 2366 | auto& info = semaphoreIt->second; |
| 2367 | |
| 2368 | if (info.eventHandle != ZX_HANDLE_INVALID) { |
| 2369 | zx_handle_close(info.eventHandle); |
| 2370 | } |
| 2371 | #if VK_HEADER_VERSION < 174 |
| 2372 | info.eventHandle = pInfo->handle; |
| 2373 | #else // VK_HEADER_VERSION >= 174 |
| 2374 | info.eventHandle = pInfo->zirconHandle; |
| 2375 | #endif // VK_HEADER_VERSION < 174 |
| 2376 | if (info.eventHandle != ZX_HANDLE_INVALID) { |
| 2377 | info.eventKoid = getEventKoid(info.eventHandle); |
| 2378 | } |
| 2379 | |
| 2380 | return VK_SUCCESS; |
| 2381 | } |
| 2382 | |
| 2383 | VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA( |
| 2384 | void*, VkResult, VkDevice device, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo, |
| 2385 | uint32_t* pHandle) { |
| 2386 | if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED; |
| 2387 | if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED; |
| 2388 | |
| 2389 | AutoLock<RecursiveLock> lock(mLock); |
| 2390 | |
| 2391 | auto deviceIt = info_VkDevice.find(device); |
| 2392 | |
| 2393 | if (deviceIt == info_VkDevice.end()) { |
| 2394 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2395 | } |
| 2396 | |
| 2397 | auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore); |
| 2398 | |
| 2399 | if (semaphoreIt == info_VkSemaphore.end()) { |
| 2400 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2401 | } |
| 2402 | |
| 2403 | auto& info = semaphoreIt->second; |
| 2404 | |
| 2405 | if (info.eventHandle == ZX_HANDLE_INVALID) { |
| 2406 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2407 | } |
| 2408 | |
| 2409 | *pHandle = ZX_HANDLE_INVALID; |
| 2410 | zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle); |
| 2411 | return VK_SUCCESS; |
| 2412 | } |
| 2413 | |
| 2414 | VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA( |
| 2415 | void*, VkResult, VkDevice, const VkBufferCollectionCreateInfoFUCHSIA* pInfo, |
| 2416 | const VkAllocationCallbacks*, VkBufferCollectionFUCHSIA* pCollection) { |
| 2417 | fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client; |
| 2418 | |
| 2419 | if (pInfo->collectionToken) { |
| 2420 | token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>( |
| 2421 | zx::channel(pInfo->collectionToken)); |
| 2422 | } else { |
| 2423 | auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>(); |
| 2424 | if (!endpoints.is_ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2425 | mesa_loge("zx_channel_create failed: %d", endpoints.status_value()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2426 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2427 | } |
| 2428 | |
| 2429 | auto result = mSysmemAllocator->AllocateSharedCollection(std::move(endpoints->server)); |
| 2430 | if (!result.ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2431 | mesa_loge("AllocateSharedCollection failed: %d", result.status()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2432 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2433 | } |
| 2434 | token_client = std::move(endpoints->client); |
| 2435 | } |
| 2436 | |
| 2437 | auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>(); |
| 2438 | if (!endpoints.is_ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2439 | mesa_loge("zx_channel_create failed: %d", endpoints.status_value()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2440 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2441 | } |
| 2442 | auto [collection_client, collection_server] = std::move(endpoints.value()); |
| 2443 | |
| 2444 | auto result = mSysmemAllocator->BindSharedCollection(std::move(token_client), |
| 2445 | std::move(collection_server)); |
| 2446 | if (!result.ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2447 | mesa_loge("BindSharedCollection failed: %d", result.status()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2448 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2449 | } |
| 2450 | |
| 2451 | auto* sysmem_collection = |
| 2452 | new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(std::move(collection_client)); |
| 2453 | *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection); |
| 2454 | |
| 2455 | register_VkBufferCollectionFUCHSIA(*pCollection); |
| 2456 | return VK_SUCCESS; |
| 2457 | } |
| 2458 | |
| 2459 | void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(void*, VkResult, VkDevice, |
| 2460 | VkBufferCollectionFUCHSIA collection, |
| 2461 | const VkAllocationCallbacks*) { |
| 2462 | auto sysmem_collection = |
| 2463 | reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection); |
| 2464 | if (sysmem_collection) { |
| 2465 | (*sysmem_collection)->Close(); |
| 2466 | } |
| 2467 | delete sysmem_collection; |
| 2468 | |
| 2469 | unregister_VkBufferCollectionFUCHSIA(collection); |
| 2470 | } |
| 2471 | |
| 2472 | SetBufferCollectionImageConstraintsResult ResourceTracker::setBufferCollectionImageConstraintsImpl( |
| 2473 | VkEncoder* enc, VkDevice device, |
| 2474 | fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection, |
| 2475 | const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) { |
| 2476 | const auto& collection = *pCollection; |
| 2477 | if (!pImageConstraintsInfo || |
| 2478 | pImageConstraintsInfo->sType != VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2479 | mesa_loge("%s: invalid pImageConstraintsInfo", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2480 | return {VK_ERROR_INITIALIZATION_FAILED}; |
| 2481 | } |
| 2482 | |
| 2483 | if (pImageConstraintsInfo->formatConstraintsCount == 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2484 | mesa_loge("%s: formatConstraintsCount must be greater than 0", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2485 | abort(); |
| 2486 | } |
| 2487 | |
| 2488 | fuchsia_sysmem::wire::BufferCollectionConstraints constraints = |
| 2489 | defaultBufferCollectionConstraints( |
| 2490 | /* min_size_bytes */ 0, |
| 2491 | pImageConstraintsInfo->bufferCollectionConstraints.minBufferCount, |
| 2492 | pImageConstraintsInfo->bufferCollectionConstraints.maxBufferCount, |
| 2493 | pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping, |
| 2494 | pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack, |
| 2495 | pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack); |
| 2496 | |
| 2497 | std::vector<fuchsia_sysmem::wire::ImageFormatConstraints> format_constraints; |
| 2498 | |
| 2499 | VkPhysicalDevice physicalDevice; |
| 2500 | { |
| 2501 | AutoLock<RecursiveLock> lock(mLock); |
| 2502 | auto deviceIt = info_VkDevice.find(device); |
| 2503 | if (deviceIt == info_VkDevice.end()) { |
| 2504 | return {VK_ERROR_INITIALIZATION_FAILED}; |
| 2505 | } |
| 2506 | physicalDevice = deviceIt->second.physdev; |
| 2507 | } |
| 2508 | |
| 2509 | std::vector<uint32_t> createInfoIndex; |
| 2510 | |
| 2511 | bool hasOptimalTiling = false; |
| 2512 | for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; i++) { |
| 2513 | const VkImageCreateInfo* createInfo = |
| 2514 | &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo; |
| 2515 | const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints = |
| 2516 | &pImageConstraintsInfo->pFormatConstraints[i]; |
| 2517 | |
| 2518 | // add ImageFormatConstraints for *optimal* tiling |
| 2519 | VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED; |
| 2520 | if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) { |
| 2521 | optimalResult = addImageBufferCollectionConstraintsFUCHSIA( |
| 2522 | enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_OPTIMAL, |
| 2523 | &constraints); |
| 2524 | if (optimalResult == VK_SUCCESS) { |
| 2525 | createInfoIndex.push_back(i); |
| 2526 | hasOptimalTiling = true; |
| 2527 | } |
| 2528 | } |
| 2529 | |
| 2530 | // Add ImageFormatConstraints for *linear* tiling |
| 2531 | VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA( |
| 2532 | enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_LINEAR, &constraints); |
| 2533 | if (linearResult == VK_SUCCESS) { |
| 2534 | createInfoIndex.push_back(i); |
| 2535 | } |
| 2536 | |
| 2537 | // Update usage and BufferMemoryConstraints |
| 2538 | if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) { |
| 2539 | constraints.usage.vulkan |= getBufferCollectionConstraintsVulkanImageUsage(createInfo); |
| 2540 | |
| 2541 | if (formatConstraints && formatConstraints->flags) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2542 | mesa_logw( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2543 | "%s: Non-zero flags (%08x) in image format " |
| 2544 | "constraints; this is currently not supported, see " |
Mitchell Kember | d9f3e4a | 2024-01-29 16:53:33 -0800 | [diff] [blame] | 2545 | "fxbug.dev/42147900.", |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2546 | __func__, formatConstraints->flags); |
| 2547 | } |
| 2548 | } |
| 2549 | } |
| 2550 | |
| 2551 | // Set buffer memory constraints based on optimal/linear tiling support |
| 2552 | // and flags. |
| 2553 | VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags; |
| 2554 | if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA) |
| 2555 | constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead; |
| 2556 | if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA) |
| 2557 | constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften; |
| 2558 | if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA) |
| 2559 | constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite; |
| 2560 | if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA) |
| 2561 | constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften; |
| 2562 | |
| 2563 | constraints.has_buffer_memory_constraints = true; |
| 2564 | auto& memory_constraints = constraints.buffer_memory_constraints; |
| 2565 | memory_constraints.cpu_domain_supported = true; |
| 2566 | memory_constraints.ram_domain_supported = true; |
| 2567 | memory_constraints.inaccessible_domain_supported = |
| 2568 | hasOptimalTiling && !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA | |
| 2569 | VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA | |
| 2570 | VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA | |
| 2571 | VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)); |
| 2572 | |
| 2573 | if (memory_constraints.inaccessible_domain_supported) { |
| 2574 | memory_constraints.heap_permitted_count = 2; |
| 2575 | memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal; |
| 2576 | memory_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible; |
| 2577 | } else { |
| 2578 | memory_constraints.heap_permitted_count = 1; |
| 2579 | memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible; |
| 2580 | } |
| 2581 | |
| 2582 | if (constraints.image_format_constraints_count == 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2583 | mesa_loge("%s: none of the specified formats is supported by device", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2584 | return {VK_ERROR_FORMAT_NOT_SUPPORTED}; |
| 2585 | } |
| 2586 | |
| 2587 | constexpr uint32_t kVulkanPriority = 5; |
| 2588 | const char kName[] = "GoldfishSysmemShared"; |
| 2589 | collection->SetName(kVulkanPriority, fidl::StringView(kName)); |
| 2590 | |
| 2591 | auto result = collection->SetConstraints(true, constraints); |
| 2592 | if (!result.ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2593 | mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2594 | return {VK_ERROR_INITIALIZATION_FAILED}; |
| 2595 | } |
| 2596 | |
| 2597 | return {VK_SUCCESS, constraints, std::move(createInfoIndex)}; |
| 2598 | } |
| 2599 | |
| 2600 | VkResult ResourceTracker::setBufferCollectionImageConstraintsFUCHSIA( |
| 2601 | VkEncoder* enc, VkDevice device, |
| 2602 | fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection, |
| 2603 | const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) { |
| 2604 | const auto& collection = *pCollection; |
| 2605 | |
| 2606 | auto setConstraintsResult = |
| 2607 | setBufferCollectionImageConstraintsImpl(enc, device, pCollection, pImageConstraintsInfo); |
| 2608 | if (setConstraintsResult.result != VK_SUCCESS) { |
| 2609 | return setConstraintsResult.result; |
| 2610 | } |
| 2611 | |
| 2612 | // copy constraints to info_VkBufferCollectionFUCHSIA if |
| 2613 | // |collection| is a valid VkBufferCollectionFUCHSIA handle. |
| 2614 | AutoLock<RecursiveLock> lock(mLock); |
| 2615 | VkBufferCollectionFUCHSIA buffer_collection = |
| 2616 | reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection); |
| 2617 | if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) != |
| 2618 | info_VkBufferCollectionFUCHSIA.end()) { |
| 2619 | info_VkBufferCollectionFUCHSIA[buffer_collection].constraints = |
| 2620 | gfxstream::guest::makeOptional(std::move(setConstraintsResult.constraints)); |
| 2621 | info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex = |
| 2622 | std::move(setConstraintsResult.createInfoIndex); |
| 2623 | } |
| 2624 | |
| 2625 | return VK_SUCCESS; |
| 2626 | } |
| 2627 | |
| 2628 | VkResult ResourceTracker::setBufferCollectionBufferConstraintsFUCHSIA( |
| 2629 | fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection, |
| 2630 | const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) { |
| 2631 | auto setConstraintsResult = |
| 2632 | setBufferCollectionBufferConstraintsImpl(pCollection, pBufferConstraintsInfo); |
| 2633 | if (setConstraintsResult.result != VK_SUCCESS) { |
| 2634 | return setConstraintsResult.result; |
| 2635 | } |
| 2636 | |
| 2637 | // copy constraints to info_VkBufferCollectionFUCHSIA if |
| 2638 | // |collection| is a valid VkBufferCollectionFUCHSIA handle. |
| 2639 | AutoLock<RecursiveLock> lock(mLock); |
| 2640 | VkBufferCollectionFUCHSIA buffer_collection = |
| 2641 | reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection); |
| 2642 | if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) != |
| 2643 | info_VkBufferCollectionFUCHSIA.end()) { |
| 2644 | info_VkBufferCollectionFUCHSIA[buffer_collection].constraints = |
| 2645 | gfxstream::guest::makeOptional(setConstraintsResult.constraints); |
| 2646 | } |
| 2647 | |
| 2648 | return VK_SUCCESS; |
| 2649 | } |
| 2650 | |
| 2651 | VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA( |
| 2652 | void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection, |
| 2653 | const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) { |
| 2654 | VkEncoder* enc = (VkEncoder*)context; |
| 2655 | auto sysmem_collection = |
| 2656 | reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection); |
| 2657 | return setBufferCollectionImageConstraintsFUCHSIA(enc, device, sysmem_collection, |
| 2658 | pImageConstraintsInfo); |
| 2659 | } |
| 2660 | |
| 2661 | VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA( |
| 2662 | void*, VkResult, VkDevice, VkBufferCollectionFUCHSIA collection, |
| 2663 | const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) { |
| 2664 | auto sysmem_collection = |
| 2665 | reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection); |
| 2666 | return setBufferCollectionBufferConstraintsFUCHSIA(sysmem_collection, pBufferConstraintsInfo); |
| 2667 | } |
| 2668 | |
C Stout | 5a3a422 | 2023-11-14 16:31:56 -0800 | [diff] [blame] | 2669 | VkResult ResourceTracker::getBufferCollectionImageCreateInfoIndexLocked( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2670 | VkBufferCollectionFUCHSIA collection, fuchsia_sysmem::wire::BufferCollectionInfo2& info, |
| 2671 | uint32_t* outCreateInfoIndex) { |
| 2672 | if (!info_VkBufferCollectionFUCHSIA[collection].constraints.hasValue()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2673 | mesa_loge("%s: constraints not set", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2674 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 2675 | } |
| 2676 | |
| 2677 | if (!info.settings.has_image_format_constraints) { |
| 2678 | // no image format constraints, skip getting createInfoIndex. |
| 2679 | return VK_SUCCESS; |
| 2680 | } |
| 2681 | |
| 2682 | const auto& constraints = *info_VkBufferCollectionFUCHSIA[collection].constraints; |
| 2683 | const auto& createInfoIndices = info_VkBufferCollectionFUCHSIA[collection].createInfoIndex; |
| 2684 | const auto& out = info.settings.image_format_constraints; |
| 2685 | bool foundCreateInfo = false; |
| 2686 | |
| 2687 | for (size_t imageFormatIndex = 0; imageFormatIndex < constraints.image_format_constraints_count; |
| 2688 | imageFormatIndex++) { |
| 2689 | const auto& in = constraints.image_format_constraints[imageFormatIndex]; |
| 2690 | // These checks are sorted in order of how often they're expected to |
| 2691 | // mismatch, from most likely to least likely. They aren't always |
| 2692 | // equality comparisons, since sysmem may change some values in |
| 2693 | // compatible ways on behalf of the other participants. |
| 2694 | if ((out.pixel_format.type != in.pixel_format.type) || |
| 2695 | (out.pixel_format.has_format_modifier != in.pixel_format.has_format_modifier) || |
| 2696 | (out.pixel_format.format_modifier.value != in.pixel_format.format_modifier.value) || |
| 2697 | (out.min_bytes_per_row < in.min_bytes_per_row) || |
| 2698 | (out.required_max_coded_width < in.required_max_coded_width) || |
| 2699 | (out.required_max_coded_height < in.required_max_coded_height) || |
| 2700 | (in.bytes_per_row_divisor != 0 && |
| 2701 | out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) { |
| 2702 | continue; |
| 2703 | } |
| 2704 | // Check if the out colorspaces are a subset of the in color spaces. |
| 2705 | bool all_color_spaces_found = true; |
| 2706 | for (uint32_t j = 0; j < out.color_spaces_count; j++) { |
| 2707 | bool found_matching_color_space = false; |
| 2708 | for (uint32_t k = 0; k < in.color_spaces_count; k++) { |
| 2709 | if (out.color_space[j].type == in.color_space[k].type) { |
| 2710 | found_matching_color_space = true; |
| 2711 | break; |
| 2712 | } |
| 2713 | } |
| 2714 | if (!found_matching_color_space) { |
| 2715 | all_color_spaces_found = false; |
| 2716 | break; |
| 2717 | } |
| 2718 | } |
| 2719 | if (!all_color_spaces_found) { |
| 2720 | continue; |
| 2721 | } |
| 2722 | |
| 2723 | // Choose the first valid format for now. |
| 2724 | *outCreateInfoIndex = createInfoIndices[imageFormatIndex]; |
| 2725 | return VK_SUCCESS; |
| 2726 | } |
| 2727 | |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2728 | mesa_loge("%s: cannot find a valid image format in constraints", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2729 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 2730 | } |
| 2731 | |
| 2732 | VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA( |
| 2733 | void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection, |
| 2734 | VkBufferCollectionPropertiesFUCHSIA* pProperties) { |
| 2735 | VkEncoder* enc = (VkEncoder*)context; |
| 2736 | const auto& sysmem_collection = |
| 2737 | *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection); |
| 2738 | |
| 2739 | auto result = sysmem_collection->WaitForBuffersAllocated(); |
| 2740 | if (!result.ok() || result->status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2741 | mesa_loge("Failed wait for allocation: %d %d", result.status(), |
| 2742 | GET_STATUS_SAFE(result, status)); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2743 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2744 | } |
| 2745 | fuchsia_sysmem::wire::BufferCollectionInfo2 info = std::move(result->buffer_collection_info); |
| 2746 | |
| 2747 | bool is_host_visible = |
| 2748 | info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible; |
| 2749 | bool is_device_local = |
| 2750 | info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal; |
| 2751 | if (!is_host_visible && !is_device_local) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2752 | mesa_loge("buffer collection uses a non-goldfish heap (type 0x%lu)", |
| 2753 | static_cast<uint64_t>(info.settings.buffer_settings.heap)); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2754 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2755 | } |
| 2756 | |
| 2757 | // memoryTypeBits |
| 2758 | // ==================================================================== |
| 2759 | { |
| 2760 | AutoLock<RecursiveLock> lock(mLock); |
| 2761 | auto deviceIt = info_VkDevice.find(device); |
| 2762 | if (deviceIt == info_VkDevice.end()) { |
| 2763 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2764 | } |
| 2765 | auto& deviceInfo = deviceIt->second; |
| 2766 | |
| 2767 | // Device local memory type supported. |
| 2768 | pProperties->memoryTypeBits = 0; |
| 2769 | for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) { |
| 2770 | if ((is_device_local && (deviceInfo.memProps.memoryTypes[i].propertyFlags & |
| 2771 | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) || |
| 2772 | (is_host_visible && (deviceInfo.memProps.memoryTypes[i].propertyFlags & |
| 2773 | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) { |
| 2774 | pProperties->memoryTypeBits |= 1ull << i; |
| 2775 | } |
| 2776 | } |
| 2777 | } |
| 2778 | |
| 2779 | // bufferCount |
| 2780 | // ==================================================================== |
| 2781 | pProperties->bufferCount = info.buffer_count; |
| 2782 | |
| 2783 | auto storeProperties = [this, collection, pProperties]() -> VkResult { |
| 2784 | // store properties to storage |
| 2785 | AutoLock<RecursiveLock> lock(mLock); |
| 2786 | if (info_VkBufferCollectionFUCHSIA.find(collection) == |
| 2787 | info_VkBufferCollectionFUCHSIA.end()) { |
| 2788 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 2789 | } |
| 2790 | |
| 2791 | info_VkBufferCollectionFUCHSIA[collection].properties = |
| 2792 | gfxstream::guest::makeOptional(*pProperties); |
| 2793 | |
| 2794 | // We only do a shallow copy so we should remove all pNext pointers. |
| 2795 | info_VkBufferCollectionFUCHSIA[collection].properties->pNext = nullptr; |
| 2796 | info_VkBufferCollectionFUCHSIA[collection].properties->sysmemColorSpaceIndex.pNext = |
| 2797 | nullptr; |
| 2798 | return VK_SUCCESS; |
| 2799 | }; |
| 2800 | |
| 2801 | // The fields below only apply to buffer collections with image formats. |
| 2802 | if (!info.settings.has_image_format_constraints) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2803 | mesa_logd("%s: buffer collection doesn't have image format constraints", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2804 | return storeProperties(); |
| 2805 | } |
| 2806 | |
| 2807 | // sysmemFormat |
| 2808 | // ==================================================================== |
| 2809 | |
| 2810 | pProperties->sysmemPixelFormat = |
| 2811 | static_cast<uint64_t>(info.settings.image_format_constraints.pixel_format.type); |
| 2812 | |
| 2813 | // colorSpace |
| 2814 | // ==================================================================== |
| 2815 | if (info.settings.image_format_constraints.color_spaces_count == 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2816 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2817 | "%s: color space missing from allocated buffer collection " |
| 2818 | "constraints", |
| 2819 | __func__); |
| 2820 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 2821 | } |
| 2822 | // Only report first colorspace for now. |
| 2823 | pProperties->sysmemColorSpaceIndex.colorSpace = |
| 2824 | static_cast<uint32_t>(info.settings.image_format_constraints.color_space[0].type); |
| 2825 | |
| 2826 | // createInfoIndex |
| 2827 | // ==================================================================== |
| 2828 | { |
| 2829 | AutoLock<RecursiveLock> lock(mLock); |
| 2830 | auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked( |
| 2831 | collection, info, &pProperties->createInfoIndex); |
| 2832 | if (getIndexResult != VK_SUCCESS) { |
| 2833 | return getIndexResult; |
| 2834 | } |
| 2835 | } |
| 2836 | |
| 2837 | // formatFeatures |
| 2838 | // ==================================================================== |
| 2839 | VkPhysicalDevice physicalDevice; |
| 2840 | { |
| 2841 | AutoLock<RecursiveLock> lock(mLock); |
| 2842 | auto deviceIt = info_VkDevice.find(device); |
| 2843 | if (deviceIt == info_VkDevice.end()) { |
| 2844 | return VK_ERROR_INITIALIZATION_FAILED; |
| 2845 | } |
| 2846 | physicalDevice = deviceIt->second.physdev; |
| 2847 | } |
| 2848 | |
| 2849 | VkFormat vkFormat = |
| 2850 | sysmemPixelFormatTypeToVk(info.settings.image_format_constraints.pixel_format.type); |
| 2851 | VkFormatProperties formatProperties; |
| 2852 | enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, vkFormat, &formatProperties, |
| 2853 | true /* do lock */); |
| 2854 | if (is_device_local) { |
| 2855 | pProperties->formatFeatures = formatProperties.optimalTilingFeatures; |
| 2856 | } |
| 2857 | if (is_host_visible) { |
| 2858 | pProperties->formatFeatures = formatProperties.linearTilingFeatures; |
| 2859 | } |
| 2860 | |
| 2861 | // YCbCr properties |
| 2862 | // ==================================================================== |
| 2863 | // TODO(59804): Implement this correctly when we support YUV pixel |
| 2864 | // formats in goldfish ICD. |
| 2865 | pProperties->samplerYcbcrConversionComponents.r = VK_COMPONENT_SWIZZLE_IDENTITY; |
| 2866 | pProperties->samplerYcbcrConversionComponents.g = VK_COMPONENT_SWIZZLE_IDENTITY; |
| 2867 | pProperties->samplerYcbcrConversionComponents.b = VK_COMPONENT_SWIZZLE_IDENTITY; |
| 2868 | pProperties->samplerYcbcrConversionComponents.a = VK_COMPONENT_SWIZZLE_IDENTITY; |
| 2869 | pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY; |
| 2870 | pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL; |
| 2871 | pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT; |
| 2872 | pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT; |
| 2873 | |
| 2874 | return storeProperties(); |
| 2875 | } |
| 2876 | #endif |
| 2877 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 2878 | static uint32_t getVirglFormat(VkFormat vkFormat) { |
| 2879 | uint32_t virglFormat = 0; |
| 2880 | |
| 2881 | switch (vkFormat) { |
| 2882 | case VK_FORMAT_R8G8B8A8_SINT: |
| 2883 | case VK_FORMAT_R8G8B8A8_UNORM: |
| 2884 | case VK_FORMAT_R8G8B8A8_SRGB: |
| 2885 | case VK_FORMAT_R8G8B8A8_SNORM: |
| 2886 | case VK_FORMAT_R8G8B8A8_SSCALED: |
| 2887 | case VK_FORMAT_R8G8B8A8_USCALED: |
| 2888 | virglFormat = VIRGL_FORMAT_R8G8B8A8_UNORM; |
| 2889 | break; |
| 2890 | case VK_FORMAT_B8G8R8A8_SINT: |
| 2891 | case VK_FORMAT_B8G8R8A8_UNORM: |
| 2892 | case VK_FORMAT_B8G8R8A8_SRGB: |
| 2893 | case VK_FORMAT_B8G8R8A8_SNORM: |
| 2894 | case VK_FORMAT_B8G8R8A8_SSCALED: |
| 2895 | case VK_FORMAT_B8G8R8A8_USCALED: |
| 2896 | virglFormat = VIRGL_FORMAT_B8G8R8A8_UNORM; |
| 2897 | break; |
| 2898 | default: |
| 2899 | break; |
| 2900 | } |
| 2901 | |
| 2902 | return virglFormat; |
| 2903 | } |
| 2904 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2905 | CoherentMemoryPtr ResourceTracker::createCoherentMemory( |
| 2906 | VkDevice device, VkDeviceMemory mem, const VkMemoryAllocateInfo& hostAllocationInfo, |
| 2907 | VkEncoder* enc, VkResult& res) { |
| 2908 | CoherentMemoryPtr coherentMemory = nullptr; |
| 2909 | |
| 2910 | #if defined(__ANDROID__) |
| 2911 | if (mFeatureInfo->hasDirectMem) { |
| 2912 | uint64_t gpuAddr = 0; |
| 2913 | GoldfishAddressSpaceBlockPtr block = nullptr; |
| 2914 | res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true); |
| 2915 | if (res != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2916 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2917 | "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE " |
| 2918 | "returned:%d.", |
| 2919 | res); |
| 2920 | return coherentMemory; |
| 2921 | } |
| 2922 | { |
| 2923 | AutoLock<RecursiveLock> lock(mLock); |
| 2924 | auto it = info_VkDeviceMemory.find(mem); |
| 2925 | if (it == info_VkDeviceMemory.end()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2926 | mesa_loge("Failed to create coherent memory: failed to find device memory."); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2927 | res = VK_ERROR_OUT_OF_HOST_MEMORY; |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2928 | return coherentMemory; |
| 2929 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2930 | auto& info = it->second; |
| 2931 | block = info.goldfishBlock; |
| 2932 | info.goldfishBlock = nullptr; |
Lingfeng Yang | a963ea0 | 2019-03-21 21:27:04 -0700 | [diff] [blame] | 2933 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2934 | coherentMemory = std::make_shared<CoherentMemory>( |
| 2935 | block, gpuAddr, hostAllocationInfo.allocationSize, device, mem); |
| 2936 | } |
| 2937 | } else |
| 2938 | #endif // defined(__ANDROID__) |
Jason Macnak | 650c0c0 | 2023-07-20 16:06:53 -0700 | [diff] [blame] | 2939 | if (mFeatureInfo->hasVirtioGpuNext) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2940 | struct VirtGpuCreateBlob createBlob = {0}; |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2941 | uint64_t hvaSizeId[3]; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2942 | res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem, &hvaSizeId[0], &hvaSizeId[1], |
| 2943 | &hvaSizeId[2], true /* do lock */); |
Jason Macnak | df2f074 | 2023-08-25 16:35:33 +0000 | [diff] [blame] | 2944 | if (res != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2945 | mesa_loge( |
Jason Macnak | df2f074 | 2023-08-25 16:35:33 +0000 | [diff] [blame] | 2946 | "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE " |
| 2947 | "returned:%d.", |
| 2948 | res); |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2949 | return coherentMemory; |
| 2950 | } |
| 2951 | { |
| 2952 | AutoLock<RecursiveLock> lock(mLock); |
Jason Macnak | 313357f | 2023-07-19 14:45:43 -0700 | [diff] [blame] | 2953 | VirtGpuDevice* instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3); |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2954 | createBlob.blobMem = kBlobMemHost3d; |
| 2955 | createBlob.flags = kBlobFlagMappable; |
| 2956 | createBlob.blobId = hvaSizeId[2]; |
| 2957 | createBlob.size = hostAllocationInfo.allocationSize; |
| 2958 | |
Jason Macnak | 313357f | 2023-07-19 14:45:43 -0700 | [diff] [blame] | 2959 | auto blob = instance->createBlob(createBlob); |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2960 | if (!blob) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2961 | mesa_loge("Failed to create coherent memory: failed to create blob."); |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2962 | res = VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 2963 | return coherentMemory; |
| 2964 | } |
| 2965 | |
Jason Macnak | 6d3d7b2 | 2024-04-01 16:48:53 -0700 | [diff] [blame] | 2966 | VirtGpuResourceMappingPtr mapping = blob->createMapping(); |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2967 | if (!mapping) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2968 | mesa_loge("Failed to create coherent memory: failed to create blob mapping."); |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2969 | res = VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 2970 | return coherentMemory; |
| 2971 | } |
| 2972 | |
| 2973 | coherentMemory = |
| 2974 | std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem); |
| 2975 | } |
| 2976 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 2977 | mesa_loge("FATAL: Unsupported virtual memory feature"); |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 2978 | abort(); |
| 2979 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2980 | return coherentMemory; |
| 2981 | } |
| 2982 | |
| 2983 | VkResult ResourceTracker::allocateCoherentMemory(VkDevice device, |
| 2984 | const VkMemoryAllocateInfo* pAllocateInfo, |
| 2985 | VkEncoder* enc, VkDeviceMemory* pMemory) { |
| 2986 | uint64_t blobId = 0; |
| 2987 | uint64_t offset = 0; |
| 2988 | uint8_t* ptr = nullptr; |
| 2989 | VkMemoryAllocateFlagsInfo allocFlagsInfo; |
| 2990 | VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo; |
| 2991 | VkCreateBlobGOOGLE createBlobInfo; |
Jason Macnak | 6d3d7b2 | 2024-04-01 16:48:53 -0700 | [diff] [blame] | 2992 | VirtGpuResourcePtr guestBlob = nullptr; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 2993 | |
| 2994 | memset(&createBlobInfo, 0, sizeof(struct VkCreateBlobGOOGLE)); |
| 2995 | createBlobInfo.sType = VK_STRUCTURE_TYPE_CREATE_BLOB_GOOGLE; |
| 2996 | |
| 2997 | const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr = |
| 2998 | vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo); |
| 2999 | const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr = |
| 3000 | vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo); |
| 3001 | |
| 3002 | bool deviceAddressMemoryAllocation = |
| 3003 | allocFlagsInfoPtr && |
| 3004 | ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) || |
| 3005 | (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)); |
| 3006 | |
| 3007 | bool dedicated = deviceAddressMemoryAllocation; |
| 3008 | |
| 3009 | if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) |
| 3010 | dedicated = true; |
| 3011 | |
| 3012 | VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo); |
| 3013 | vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo); |
| 3014 | |
| 3015 | if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) { |
| 3016 | hostAllocationInfo.allocationSize = |
| 3017 | ALIGN(pAllocateInfo->allocationSize, mCaps.vulkanCapset.blobAlignment); |
| 3018 | } else if (dedicated) { |
| 3019 | // Over-aligning to kLargestSize to some Windows drivers (b:152769369). Can likely |
| 3020 | // have host report the desired alignment. |
| 3021 | hostAllocationInfo.allocationSize = ALIGN(pAllocateInfo->allocationSize, kLargestPageSize); |
| 3022 | } else { |
| 3023 | VkDeviceSize roundedUpAllocSize = ALIGN(pAllocateInfo->allocationSize, kMegaByte); |
| 3024 | hostAllocationInfo.allocationSize = std::max(roundedUpAllocSize, kDefaultHostMemBlockSize); |
Hailin zhang | ad3b3de | 2022-10-03 22:46:10 +0000 | [diff] [blame] | 3025 | } |
| 3026 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3027 | // Support device address capture/replay allocations |
| 3028 | if (deviceAddressMemoryAllocation) { |
Lingfeng Yang | bf019a6 | 2021-08-26 15:43:39 -0700 | [diff] [blame] | 3029 | if (allocFlagsInfoPtr) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3030 | mesa_logi("%s: has alloc flags\n", __func__); |
Lingfeng Yang | bf019a6 | 2021-08-26 15:43:39 -0700 | [diff] [blame] | 3031 | allocFlagsInfo = *allocFlagsInfoPtr; |
| 3032 | vk_append_struct(&structChainIter, &allocFlagsInfo); |
| 3033 | } |
| 3034 | |
| 3035 | if (opaqueCaptureAddressAllocInfoPtr) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3036 | mesa_logi("%s: has opaque capture address\n", __func__); |
Lingfeng Yang | bf019a6 | 2021-08-26 15:43:39 -0700 | [diff] [blame] | 3037 | opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr; |
| 3038 | vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo); |
| 3039 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3040 | } |
Lingfeng Yang | bf019a6 | 2021-08-26 15:43:39 -0700 | [diff] [blame] | 3041 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3042 | if (mCaps.params[kParamCreateGuestHandle]) { |
| 3043 | struct VirtGpuCreateBlob createBlob = {0}; |
| 3044 | struct VirtGpuExecBuffer exec = {}; |
| 3045 | VirtGpuDevice* instance = VirtGpuDevice::getInstance(); |
| 3046 | struct gfxstreamPlaceholderCommandVk placeholderCmd = {}; |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3047 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3048 | createBlobInfo.blobId = ++mBlobId; |
| 3049 | createBlobInfo.blobMem = kBlobMemGuest; |
| 3050 | createBlobInfo.blobFlags = kBlobFlagCreateGuestHandle; |
| 3051 | vk_append_struct(&structChainIter, &createBlobInfo); |
| 3052 | |
| 3053 | createBlob.blobMem = kBlobMemGuest; |
| 3054 | createBlob.flags = kBlobFlagCreateGuestHandle; |
| 3055 | createBlob.blobId = createBlobInfo.blobId; |
| 3056 | createBlob.size = hostAllocationInfo.allocationSize; |
| 3057 | |
| 3058 | guestBlob = instance->createBlob(createBlob); |
| 3059 | if (!guestBlob) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3060 | mesa_loge("Failed to allocate coherent memory: failed to create blob."); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3061 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 3062 | } |
| 3063 | |
| 3064 | placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK; |
| 3065 | exec.command = static_cast<void*>(&placeholderCmd); |
| 3066 | exec.command_size = sizeof(placeholderCmd); |
| 3067 | exec.flags = kRingIdx; |
| 3068 | exec.ring_idx = 1; |
Tom Cherry | 772e6ac | 2023-04-12 13:38:29 -0700 | [diff] [blame] | 3069 | if (instance->execBuffer(exec, guestBlob.get())) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3070 | mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait."); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3071 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 3072 | } |
| 3073 | |
| 3074 | guestBlob->wait(); |
| 3075 | } else if (mCaps.vulkanCapset.deferredMapping) { |
| 3076 | createBlobInfo.blobId = ++mBlobId; |
| 3077 | createBlobInfo.blobMem = kBlobMemHost3d; |
| 3078 | vk_append_struct(&structChainIter, &createBlobInfo); |
| 3079 | } |
| 3080 | |
| 3081 | VkDeviceMemory mem = VK_NULL_HANDLE; |
| 3082 | VkResult host_res = |
| 3083 | enc->vkAllocateMemory(device, &hostAllocationInfo, nullptr, &mem, true /* do lock */); |
| 3084 | if (host_res != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3085 | mesa_loge("Failed to allocate coherent memory: failed to allocate on the host: %d.", |
| 3086 | host_res); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3087 | return host_res; |
| 3088 | } |
| 3089 | |
| 3090 | struct VkDeviceMemory_Info info; |
| 3091 | if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) { |
| 3092 | info.allocationSize = pAllocateInfo->allocationSize; |
| 3093 | info.blobId = createBlobInfo.blobId; |
| 3094 | } |
| 3095 | |
| 3096 | if (guestBlob) { |
| 3097 | auto mapping = guestBlob->createMapping(); |
| 3098 | if (!mapping) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3099 | mesa_loge("Failed to allocate coherent memory: failed to create blob mapping."); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3100 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 3101 | } |
| 3102 | |
| 3103 | auto coherentMemory = std::make_shared<CoherentMemory>( |
| 3104 | mapping, hostAllocationInfo.allocationSize, device, mem); |
| 3105 | |
| 3106 | coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset); |
| 3107 | info.coherentMemoryOffset = offset; |
| 3108 | info.coherentMemory = coherentMemory; |
| 3109 | info.ptr = ptr; |
| 3110 | } |
| 3111 | |
| 3112 | info.coherentMemorySize = hostAllocationInfo.allocationSize; |
| 3113 | info.memoryTypeIndex = hostAllocationInfo.memoryTypeIndex; |
| 3114 | info.device = device; |
| 3115 | info.dedicated = dedicated; |
| 3116 | { |
| 3117 | // createCoherentMemory inside need to access info_VkDeviceMemory |
| 3118 | // information. set it before use. |
| 3119 | AutoLock<RecursiveLock> lock(mLock); |
| 3120 | info_VkDeviceMemory[mem] = info; |
| 3121 | } |
| 3122 | |
| 3123 | if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) { |
| 3124 | *pMemory = mem; |
| 3125 | return host_res; |
| 3126 | } |
| 3127 | |
| 3128 | auto coherentMemory = createCoherentMemory(device, mem, hostAllocationInfo, enc, host_res); |
| 3129 | if (coherentMemory) { |
| 3130 | AutoLock<RecursiveLock> lock(mLock); |
| 3131 | coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset); |
| 3132 | info.allocationSize = pAllocateInfo->allocationSize; |
| 3133 | info.coherentMemoryOffset = offset; |
| 3134 | info.coherentMemory = coherentMemory; |
| 3135 | info.ptr = ptr; |
| 3136 | info_VkDeviceMemory[mem] = info; |
| 3137 | *pMemory = mem; |
| 3138 | } else { |
| 3139 | enc->vkFreeMemory(device, mem, nullptr, true); |
| 3140 | AutoLock<RecursiveLock> lock(mLock); |
| 3141 | info_VkDeviceMemory.erase(mem); |
| 3142 | } |
| 3143 | return host_res; |
| 3144 | } |
| 3145 | |
| 3146 | VkResult ResourceTracker::getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo, |
| 3147 | VkEncoder* enc, VkDevice device, |
| 3148 | VkDeviceMemory* pMemory) { |
| 3149 | VkMemoryAllocateFlagsInfo allocFlagsInfo; |
| 3150 | VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo; |
| 3151 | |
| 3152 | // Add buffer device address capture structs |
| 3153 | const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr = |
| 3154 | vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo); |
| 3155 | |
| 3156 | bool dedicated = |
| 3157 | allocFlagsInfoPtr && |
| 3158 | ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) || |
| 3159 | (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)); |
| 3160 | |
| 3161 | if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) |
| 3162 | dedicated = true; |
| 3163 | |
| 3164 | CoherentMemoryPtr coherentMemory = nullptr; |
| 3165 | uint8_t* ptr = nullptr; |
| 3166 | uint64_t offset = 0; |
| 3167 | { |
| 3168 | AutoLock<RecursiveLock> lock(mLock); |
| 3169 | for (const auto& [memory, info] : info_VkDeviceMemory) { |
Lars Harrison | 18a161a | 2024-02-03 02:18:23 +0000 | [diff] [blame] | 3170 | if (info.device != device) continue; |
| 3171 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3172 | if (info.memoryTypeIndex != pAllocateInfo->memoryTypeIndex) continue; |
| 3173 | |
| 3174 | if (info.dedicated || dedicated) continue; |
| 3175 | |
| 3176 | if (!info.coherentMemory) continue; |
| 3177 | |
| 3178 | if (!info.coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset)) |
| 3179 | continue; |
| 3180 | |
| 3181 | coherentMemory = info.coherentMemory; |
| 3182 | break; |
| 3183 | } |
| 3184 | if (coherentMemory) { |
| 3185 | struct VkDeviceMemory_Info info; |
| 3186 | info.coherentMemoryOffset = offset; |
| 3187 | info.ptr = ptr; |
| 3188 | info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex; |
| 3189 | info.allocationSize = pAllocateInfo->allocationSize; |
| 3190 | info.coherentMemory = coherentMemory; |
| 3191 | info.device = device; |
| 3192 | |
| 3193 | // for suballocated memory, create an alias VkDeviceMemory handle for application |
| 3194 | // memory used for suballocations will still be VkDeviceMemory associated with |
| 3195 | // CoherentMemory |
| 3196 | auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE); |
| 3197 | info_VkDeviceMemory[mem] = info; |
| 3198 | *pMemory = mem; |
| 3199 | return VK_SUCCESS; |
| 3200 | } |
| 3201 | } |
| 3202 | return allocateCoherentMemory(device, pAllocateInfo, enc, pMemory); |
| 3203 | } |
| 3204 | |
| 3205 | VkResult ResourceTracker::on_vkAllocateMemory(void* context, VkResult input_result, VkDevice device, |
| 3206 | const VkMemoryAllocateInfo* pAllocateInfo, |
| 3207 | const VkAllocationCallbacks* pAllocator, |
| 3208 | VkDeviceMemory* pMemory) { |
| 3209 | #define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result) \ |
| 3210 | { \ |
| 3211 | auto it = info_VkDevice.find(device); \ |
| 3212 | if (it == info_VkDevice.end()) return result; \ |
| 3213 | emitDeviceMemoryReport(it->second, \ |
| 3214 | VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, 0, \ |
| 3215 | pAllocateInfo->allocationSize, VK_OBJECT_TYPE_DEVICE_MEMORY, 0, \ |
| 3216 | pAllocateInfo->memoryTypeIndex); \ |
| 3217 | return result; \ |
| 3218 | } |
| 3219 | |
| 3220 | #define _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT \ |
| 3221 | { \ |
| 3222 | uint64_t memoryObjectId = (uint64_t)(void*)*pMemory; \ |
| 3223 | if (ahw) { \ |
| 3224 | memoryObjectId = getAHardwareBufferId(ahw); \ |
| 3225 | } \ |
| 3226 | emitDeviceMemoryReport(info_VkDevice[device], \ |
| 3227 | isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT \ |
| 3228 | : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, \ |
| 3229 | memoryObjectId, pAllocateInfo->allocationSize, \ |
| 3230 | VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory, \ |
| 3231 | pAllocateInfo->memoryTypeIndex); \ |
| 3232 | return VK_SUCCESS; \ |
| 3233 | } |
| 3234 | |
| 3235 | if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result); |
| 3236 | |
| 3237 | VkEncoder* enc = (VkEncoder*)context; |
| 3238 | |
Gurchetan Singh | 46aac19 | 2024-06-26 16:31:32 -0700 | [diff] [blame] | 3239 | bool hasDedicatedImage = false; |
| 3240 | bool hasDedicatedBuffer = false; |
| 3241 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3242 | VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo); |
| 3243 | vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo); |
| 3244 | |
| 3245 | VkMemoryAllocateFlagsInfo allocFlagsInfo; |
| 3246 | VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo; |
| 3247 | |
| 3248 | // Add buffer device address capture structs |
| 3249 | const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr = |
| 3250 | vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo); |
| 3251 | const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr = |
| 3252 | vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo); |
| 3253 | |
| 3254 | if (allocFlagsInfoPtr) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3255 | mesa_logi("%s: has alloc flags\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3256 | allocFlagsInfo = *allocFlagsInfoPtr; |
| 3257 | vk_append_struct(&structChainIter, &allocFlagsInfo); |
| 3258 | } |
| 3259 | |
| 3260 | if (opaqueCaptureAddressAllocInfoPtr) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3261 | mesa_logi("%s: has opaque capture address\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3262 | opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr; |
| 3263 | vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo); |
| 3264 | } |
| 3265 | |
| 3266 | VkMemoryDedicatedAllocateInfo dedicatedAllocInfo; |
| 3267 | VkImportColorBufferGOOGLE importCbInfo = { |
| 3268 | VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE, |
| 3269 | 0, |
| 3270 | }; |
| 3271 | VkImportBufferGOOGLE importBufferInfo = { |
| 3272 | VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE, |
| 3273 | 0, |
| 3274 | }; |
| 3275 | // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = { |
| 3276 | // VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0, |
| 3277 | // }; |
| 3278 | |
| 3279 | const VkExportMemoryAllocateInfo* exportAllocateInfoPtr = |
| 3280 | vk_find_struct<VkExportMemoryAllocateInfo>(pAllocateInfo); |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3281 | |
Kaiyi Li | 6a76b33 | 2022-08-23 08:10:59 -0700 | [diff] [blame] | 3282 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3283 | const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr = |
| 3284 | vk_find_struct<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo); |
Kaiyi Li | 6a76b33 | 2022-08-23 08:10:59 -0700 | [diff] [blame] | 3285 | #else |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3286 | const void* importAhbInfoPtr = nullptr; |
Kaiyi Li | 6a76b33 | 2022-08-23 08:10:59 -0700 | [diff] [blame] | 3287 | #endif |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3288 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3289 | #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR) |
| 3290 | const VkImportMemoryFdInfoKHR* importFdInfoPtr = |
| 3291 | vk_find_struct<VkImportMemoryFdInfoKHR>(pAllocateInfo); |
| 3292 | #else |
| 3293 | const VkImportMemoryFdInfoKHR* importFdInfoPtr = nullptr; |
| 3294 | #endif |
| 3295 | |
Yilong Li | d5a6c16 | 2022-01-04 01:03:12 -0800 | [diff] [blame] | 3296 | #ifdef VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3297 | const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr = |
| 3298 | vk_find_struct<VkImportMemoryBufferCollectionFUCHSIA>(pAllocateInfo); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 3299 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3300 | const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr = |
| 3301 | vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(pAllocateInfo); |
Yilong Li | d5a6c16 | 2022-01-04 01:03:12 -0800 | [diff] [blame] | 3302 | #else |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3303 | const void* importBufferCollectionInfoPtr = nullptr; |
| 3304 | const void* importVmoInfoPtr = nullptr; |
Yilong Li | d5a6c16 | 2022-01-04 01:03:12 -0800 | [diff] [blame] | 3305 | #endif // VK_USE_PLATFORM_FUCHSIA |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3306 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3307 | const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr = |
| 3308 | vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo); |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3309 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3310 | // Note for AHardwareBuffers, the Vulkan spec states: |
| 3311 | // |
| 3312 | // Android hardware buffers have intrinsic width, height, format, and usage |
| 3313 | // properties, so Vulkan images bound to memory imported from an Android |
| 3314 | // hardware buffer must use dedicated allocations |
| 3315 | // |
| 3316 | // so any allocation requests with a VkImportAndroidHardwareBufferInfoANDROID |
| 3317 | // will necessarily have a VkMemoryDedicatedAllocateInfo. However, the host |
| 3318 | // may or may not actually use a dedicated allocation to emulate |
| 3319 | // AHardwareBuffers. As such, the VkMemoryDedicatedAllocateInfo is passed to the |
| 3320 | // host and the host will decide whether or not to use it. |
Jason Macnak | b233a51 | 2023-02-23 00:40:01 +0000 | [diff] [blame] | 3321 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3322 | bool shouldPassThroughDedicatedAllocInfo = |
| 3323 | !exportAllocateInfoPtr && !importBufferCollectionInfoPtr && !importVmoInfoPtr; |
Yilong Li | 2753c7a | 2021-02-04 12:58:33 -0800 | [diff] [blame] | 3324 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3325 | const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProps = |
| 3326 | getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE); |
Jason Macnak | 894a293 | 2023-03-15 09:41:46 -0700 | [diff] [blame] | 3327 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3328 | const bool requestedMemoryIsHostVisible = |
| 3329 | isHostVisible(&physicalDeviceMemoryProps, pAllocateInfo->memoryTypeIndex); |
Jason Macnak | 894a293 | 2023-03-15 09:41:46 -0700 | [diff] [blame] | 3330 | |
Yilong Li | 32f5c9e | 2023-03-30 21:37:53 -0700 | [diff] [blame] | 3331 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3332 | shouldPassThroughDedicatedAllocInfo &= !requestedMemoryIsHostVisible; |
Yilong Li | 2753c7a | 2021-02-04 12:58:33 -0800 | [diff] [blame] | 3333 | #endif // VK_USE_PLATFORM_FUCHSIA |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3334 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3335 | if (shouldPassThroughDedicatedAllocInfo && dedicatedAllocInfoPtr) { |
| 3336 | dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr); |
| 3337 | vk_append_struct(&structChainIter, &dedicatedAllocInfo); |
| 3338 | } |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3339 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3340 | // State needed for import/export. |
| 3341 | bool exportAhb = false; |
| 3342 | bool exportVmo = false; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3343 | bool exportDmabuf = false; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3344 | bool importAhb = false; |
| 3345 | bool importBufferCollection = false; |
| 3346 | bool importVmo = false; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3347 | bool importDmabuf = false; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3348 | (void)exportVmo; |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3349 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3350 | // Even if we export allocate, the underlying operation |
| 3351 | // for the host is always going to be an import operation. |
| 3352 | // This is also how Intel's implementation works, |
| 3353 | // and is generally simpler; |
| 3354 | // even in an export allocation, |
| 3355 | // we perform AHardwareBuffer allocation |
| 3356 | // on the guest side, at this layer, |
| 3357 | // and then we attach a new VkDeviceMemory |
| 3358 | // to the AHardwareBuffer on the host via an "import" operation. |
| 3359 | AHardwareBuffer* ahw = nullptr; |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3360 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3361 | if (exportAllocateInfoPtr) { |
| 3362 | exportAhb = exportAllocateInfoPtr->handleTypes & |
| 3363 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; |
Yilong Li | d5a6c16 | 2022-01-04 01:03:12 -0800 | [diff] [blame] | 3364 | #ifdef VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3365 | exportVmo = exportAllocateInfoPtr->handleTypes & |
| 3366 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA; |
Yilong Li | d5a6c16 | 2022-01-04 01:03:12 -0800 | [diff] [blame] | 3367 | #endif // VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3368 | exportDmabuf = |
| 3369 | exportAllocateInfoPtr->handleTypes & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT | |
| 3370 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3371 | } else if (importAhbInfoPtr) { |
| 3372 | importAhb = true; |
| 3373 | } else if (importBufferCollectionInfoPtr) { |
| 3374 | importBufferCollection = true; |
| 3375 | } else if (importVmoInfoPtr) { |
| 3376 | importVmo = true; |
| 3377 | } |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3378 | |
| 3379 | if (importFdInfoPtr) { |
| 3380 | importDmabuf = |
| 3381 | (importFdInfoPtr->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT | |
| 3382 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)); |
| 3383 | } |
| 3384 | bool isImport = importAhb || importBufferCollection || importVmo || importDmabuf; |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3385 | |
Craig Stout | ed464d7 | 2022-07-13 14:57:38 -0700 | [diff] [blame] | 3386 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3387 | if (exportAhb) { |
Gurchetan Singh | 46aac19 | 2024-06-26 16:31:32 -0700 | [diff] [blame] | 3388 | hasDedicatedImage = |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3389 | dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE); |
Gurchetan Singh | 46aac19 | 2024-06-26 16:31:32 -0700 | [diff] [blame] | 3390 | hasDedicatedBuffer = |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3391 | dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE); |
| 3392 | VkExtent3D imageExtent = {0, 0, 0}; |
| 3393 | uint32_t imageLayers = 0; |
| 3394 | VkFormat imageFormat = VK_FORMAT_UNDEFINED; |
| 3395 | VkImageUsageFlags imageUsage = 0; |
| 3396 | VkImageCreateFlags imageCreateFlags = 0; |
| 3397 | VkDeviceSize bufferSize = 0; |
| 3398 | VkDeviceSize allocationInfoAllocSize = finalAllocInfo.allocationSize; |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3399 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3400 | if (hasDedicatedImage) { |
| 3401 | AutoLock<RecursiveLock> lock(mLock); |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3402 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3403 | auto it = info_VkImage.find(dedicatedAllocInfoPtr->image); |
| 3404 | if (it == info_VkImage.end()) |
| 3405 | _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED); |
| 3406 | const auto& info = it->second; |
| 3407 | const auto& imgCi = info.createInfo; |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3408 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3409 | imageExtent = imgCi.extent; |
| 3410 | imageLayers = imgCi.arrayLayers; |
| 3411 | imageFormat = imgCi.format; |
| 3412 | imageUsage = imgCi.usage; |
| 3413 | imageCreateFlags = imgCi.flags; |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3414 | } |
| 3415 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3416 | if (hasDedicatedBuffer) { |
| 3417 | AutoLock<RecursiveLock> lock(mLock); |
| 3418 | |
| 3419 | auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer); |
| 3420 | if (it == info_VkBuffer.end()) |
| 3421 | _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED); |
| 3422 | const auto& info = it->second; |
| 3423 | const auto& bufCi = info.createInfo; |
| 3424 | |
| 3425 | bufferSize = bufCi.size; |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3426 | } |
| 3427 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3428 | VkResult ahbCreateRes = createAndroidHardwareBuffer( |
| 3429 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(), |
| 3430 | hasDedicatedImage, hasDedicatedBuffer, imageExtent, imageLayers, imageFormat, |
| 3431 | imageUsage, imageCreateFlags, bufferSize, allocationInfoAllocSize, &ahw); |
Jason Macnak | 485e4be9 | 2022-06-14 16:33:46 -0700 | [diff] [blame] | 3432 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3433 | if (ahbCreateRes != VK_SUCCESS) { |
| 3434 | _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes); |
Lingfeng Yang | a4b97ac | 2019-02-19 15:50:26 -0800 | [diff] [blame] | 3435 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3436 | } |
| 3437 | |
| 3438 | if (importAhb) { |
| 3439 | ahw = importAhbInfoPtr->buffer; |
| 3440 | // We still need to acquire the AHardwareBuffer. |
| 3441 | importAndroidHardwareBuffer( |
| 3442 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(), |
| 3443 | importAhbInfoPtr, nullptr); |
| 3444 | } |
| 3445 | |
| 3446 | if (ahw) { |
| 3447 | auto* gralloc = |
| 3448 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(); |
| 3449 | |
| 3450 | const uint32_t hostHandle = gralloc->getHostHandle(ahw); |
Yahan Zhou | 79ab57a | 2023-09-14 16:24:26 -0700 | [diff] [blame] | 3451 | if (gralloc->getFormat(ahw) == AHARDWAREBUFFER_FORMAT_BLOB && |
| 3452 | !gralloc->treatBlobAsImage()) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3453 | importBufferInfo.buffer = hostHandle; |
| 3454 | vk_append_struct(&structChainIter, &importBufferInfo); |
| 3455 | } else { |
| 3456 | importCbInfo.colorBuffer = hostHandle; |
| 3457 | vk_append_struct(&structChainIter, &importCbInfo); |
| 3458 | } |
| 3459 | } |
Craig Stout | ed464d7 | 2022-07-13 14:57:38 -0700 | [diff] [blame] | 3460 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3461 | zx_handle_t vmo_handle = ZX_HANDLE_INVALID; |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 3462 | |
David Reveman | d5d92d6 | 2019-03-29 15:19:25 -0400 | [diff] [blame] | 3463 | #ifdef VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3464 | if (importBufferCollection) { |
| 3465 | const auto& collection = |
| 3466 | *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>( |
David Reveman | d5d92d6 | 2019-03-29 15:19:25 -0400 | [diff] [blame] | 3467 | importBufferCollectionInfoPtr->collection); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3468 | auto result = collection->WaitForBuffersAllocated(); |
| 3469 | if (!result.ok() || result->status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3470 | mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(), |
| 3471 | GET_STATUS_SAFE(result, status)); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3472 | _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED); |
| 3473 | } |
| 3474 | fuchsia_sysmem::wire::BufferCollectionInfo2& info = result->buffer_collection_info; |
| 3475 | uint32_t index = importBufferCollectionInfoPtr->index; |
| 3476 | if (info.buffer_count < index) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3477 | mesa_loge("Invalid buffer index: %d %d", index); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3478 | _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED); |
| 3479 | } |
| 3480 | vmo_handle = info.buffers[index].vmo.release(); |
| 3481 | } |
| 3482 | |
| 3483 | if (importVmo) { |
| 3484 | vmo_handle = importVmoInfoPtr->handle; |
| 3485 | } |
| 3486 | |
| 3487 | if (exportVmo) { |
Gurchetan Singh | 46aac19 | 2024-06-26 16:31:32 -0700 | [diff] [blame] | 3488 | hasDedicatedImage = |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3489 | dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE); |
Gurchetan Singh | 46aac19 | 2024-06-26 16:31:32 -0700 | [diff] [blame] | 3490 | hasDedicatedBuffer = |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3491 | dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE); |
| 3492 | |
| 3493 | if (hasDedicatedImage && hasDedicatedBuffer) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3494 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3495 | "Invalid VkMemoryDedicatedAllocationInfo: At least one " |
| 3496 | "of image and buffer must be VK_NULL_HANDLE."); |
| 3497 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
David Reveman | d5d92d6 | 2019-03-29 15:19:25 -0400 | [diff] [blame] | 3498 | } |
| 3499 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3500 | const VkImageCreateInfo* pImageCreateInfo = nullptr; |
| 3501 | |
| 3502 | VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = { |
| 3503 | .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA, |
| 3504 | .pNext = nullptr, |
| 3505 | .createInfo = {}, |
| 3506 | .requiredFormatFeatures = 0, |
| 3507 | .bufferCollectionConstraints = |
| 3508 | VkBufferCollectionConstraintsInfoFUCHSIA{ |
| 3509 | .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA, |
| 3510 | .pNext = nullptr, |
| 3511 | .minBufferCount = 1, |
| 3512 | .maxBufferCount = 0, |
| 3513 | .minBufferCountForCamping = 0, |
| 3514 | .minBufferCountForDedicatedSlack = 0, |
| 3515 | .minBufferCountForSharedSlack = 0, |
| 3516 | }, |
| 3517 | }; |
| 3518 | const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo = nullptr; |
| 3519 | |
| 3520 | if (hasDedicatedImage) { |
| 3521 | AutoLock<RecursiveLock> lock(mLock); |
| 3522 | |
| 3523 | auto it = info_VkImage.find(dedicatedAllocInfoPtr->image); |
| 3524 | if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED; |
| 3525 | const auto& imageInfo = it->second; |
| 3526 | |
| 3527 | pImageCreateInfo = &imageInfo.createInfo; |
David Reveman | d5d92d6 | 2019-03-29 15:19:25 -0400 | [diff] [blame] | 3528 | } |
| 3529 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3530 | if (hasDedicatedBuffer) { |
| 3531 | AutoLock<RecursiveLock> lock(mLock); |
David Reveman | 5b7c584 | 2019-02-20 01:06:48 -0500 | [diff] [blame] | 3532 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3533 | auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer); |
| 3534 | if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED; |
| 3535 | const auto& bufferInfo = it->second; |
| 3536 | |
| 3537 | bufferConstraintsInfo.createInfo = bufferInfo.createInfo; |
| 3538 | pBufferConstraintsInfo = &bufferConstraintsInfo; |
| 3539 | } |
| 3540 | |
| 3541 | hasDedicatedImage = |
| 3542 | hasDedicatedImage && getBufferCollectionConstraintsVulkanImageUsage(pImageCreateInfo); |
| 3543 | hasDedicatedBuffer = hasDedicatedBuffer && getBufferCollectionConstraintsVulkanBufferUsage( |
| 3544 | pBufferConstraintsInfo); |
| 3545 | |
| 3546 | if (hasDedicatedImage || hasDedicatedBuffer) { |
| 3547 | auto token_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>(); |
| 3548 | if (!token_ends.is_ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3549 | mesa_loge("zx_channel_create failed: %d", token_ends.status_value()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3550 | abort(); |
Yilong Li | e1fddb5 | 2020-07-10 17:56:43 -0700 | [diff] [blame] | 3551 | } |
| 3552 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3553 | { |
| 3554 | auto result = |
| 3555 | mSysmemAllocator->AllocateSharedCollection(std::move(token_ends->server)); |
| 3556 | if (!result.ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3557 | mesa_loge("AllocateSharedCollection failed: %d", result.status()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3558 | abort(); |
| 3559 | } |
| 3560 | } |
Yilong Li | e1fddb5 | 2020-07-10 17:56:43 -0700 | [diff] [blame] | 3561 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3562 | auto collection_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>(); |
| 3563 | if (!collection_ends.is_ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3564 | mesa_loge("zx_channel_create failed: %d", collection_ends.status_value()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3565 | abort(); |
| 3566 | } |
Yilong Li | e1fddb5 | 2020-07-10 17:56:43 -0700 | [diff] [blame] | 3567 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3568 | { |
| 3569 | auto result = mSysmemAllocator->BindSharedCollection( |
| 3570 | std::move(token_ends->client), std::move(collection_ends->server)); |
| 3571 | if (!result.ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3572 | mesa_loge("BindSharedCollection failed: %d", result.status()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3573 | abort(); |
| 3574 | } |
| 3575 | } |
| 3576 | |
| 3577 | fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection( |
| 3578 | std::move(collection_ends->client)); |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 3579 | if (hasDedicatedImage) { |
Mitchell Kember | d9f3e4a | 2024-01-29 16:53:33 -0800 | [diff] [blame] | 3580 | // TODO(fxbug.dev/42172354): Use setBufferCollectionImageConstraintsFUCHSIA. |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3581 | VkResult res = setBufferCollectionConstraintsFUCHSIA(enc, device, &collection, |
| 3582 | pImageCreateInfo); |
| 3583 | if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3584 | mesa_loge("setBufferCollectionConstraints failed: format %u is not supported", |
| 3585 | pImageCreateInfo->format); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3586 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 3587 | } |
| 3588 | if (res != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3589 | mesa_loge("setBufferCollectionConstraints failed: %d", res); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3590 | abort(); |
| 3591 | } |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 3592 | } |
| 3593 | |
Yilong Li | e1fddb5 | 2020-07-10 17:56:43 -0700 | [diff] [blame] | 3594 | if (hasDedicatedBuffer) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3595 | VkResult res = setBufferCollectionBufferConstraintsFUCHSIA(&collection, |
| 3596 | pBufferConstraintsInfo); |
| 3597 | if (res != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3598 | mesa_loge("setBufferCollectionBufferConstraints failed: %d", res); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3599 | abort(); |
| 3600 | } |
Yilong Li | e1fddb5 | 2020-07-10 17:56:43 -0700 | [diff] [blame] | 3601 | } |
| 3602 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3603 | { |
| 3604 | auto result = collection->WaitForBuffersAllocated(); |
| 3605 | if (result.ok() && result->status == ZX_OK) { |
| 3606 | fuchsia_sysmem::wire::BufferCollectionInfo2& info = |
| 3607 | result->buffer_collection_info; |
| 3608 | if (!info.buffer_count) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3609 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3610 | "WaitForBuffersAllocated returned " |
| 3611 | "invalid count: %d", |
| 3612 | info.buffer_count); |
| 3613 | abort(); |
| 3614 | } |
| 3615 | vmo_handle = info.buffers[0].vmo.release(); |
| 3616 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3617 | mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(), |
| 3618 | GET_STATUS_SAFE(result, status)); |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 3619 | abort(); |
| 3620 | } |
David Reveman | 5b7c584 | 2019-02-20 01:06:48 -0500 | [diff] [blame] | 3621 | } |
David Reveman | 5b7c584 | 2019-02-20 01:06:48 -0500 | [diff] [blame] | 3622 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3623 | collection->Close(); |
| 3624 | |
David Reveman | aa65ad6 | 2019-05-02 08:23:46 -0400 | [diff] [blame] | 3625 | zx::vmo vmo_copy; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3626 | zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, |
David Reveman | aa65ad6 | 2019-05-02 08:23:46 -0400 | [diff] [blame] | 3627 | vmo_copy.reset_and_get_address()); |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 3628 | if (status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3629 | mesa_loge("Failed to duplicate VMO: %d", status); |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 3630 | abort(); |
| 3631 | } |
Yilong Li | f8eda75 | 2020-07-01 19:19:45 -0700 | [diff] [blame] | 3632 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3633 | if (pImageCreateInfo) { |
| 3634 | // Only device-local images need to create color buffer; for |
| 3635 | // host-visible images, the color buffer is already created |
| 3636 | // when sysmem allocates memory. Here we use the |tiling| |
| 3637 | // field of image creation info to determine if it uses |
| 3638 | // host-visible memory. |
| 3639 | bool isLinear = pImageCreateInfo->tiling == VK_IMAGE_TILING_LINEAR; |
| 3640 | if (!isLinear) { |
| 3641 | fuchsia_hardware_goldfish::wire::ColorBufferFormatType format; |
| 3642 | switch (pImageCreateInfo->format) { |
| 3643 | case VK_FORMAT_B8G8R8A8_SINT: |
| 3644 | case VK_FORMAT_B8G8R8A8_UNORM: |
| 3645 | case VK_FORMAT_B8G8R8A8_SRGB: |
| 3646 | case VK_FORMAT_B8G8R8A8_SNORM: |
| 3647 | case VK_FORMAT_B8G8R8A8_SSCALED: |
| 3648 | case VK_FORMAT_B8G8R8A8_USCALED: |
| 3649 | format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra; |
| 3650 | break; |
| 3651 | case VK_FORMAT_R8G8B8A8_SINT: |
| 3652 | case VK_FORMAT_R8G8B8A8_UNORM: |
| 3653 | case VK_FORMAT_R8G8B8A8_SRGB: |
| 3654 | case VK_FORMAT_R8G8B8A8_SNORM: |
| 3655 | case VK_FORMAT_R8G8B8A8_SSCALED: |
| 3656 | case VK_FORMAT_R8G8B8A8_USCALED: |
| 3657 | format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba; |
| 3658 | break; |
| 3659 | case VK_FORMAT_R8_UNORM: |
| 3660 | case VK_FORMAT_R8_UINT: |
| 3661 | case VK_FORMAT_R8_USCALED: |
| 3662 | case VK_FORMAT_R8_SNORM: |
| 3663 | case VK_FORMAT_R8_SINT: |
| 3664 | case VK_FORMAT_R8_SSCALED: |
| 3665 | case VK_FORMAT_R8_SRGB: |
| 3666 | format = |
| 3667 | fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kLuminance; |
| 3668 | break; |
| 3669 | case VK_FORMAT_R8G8_UNORM: |
| 3670 | case VK_FORMAT_R8G8_UINT: |
| 3671 | case VK_FORMAT_R8G8_USCALED: |
| 3672 | case VK_FORMAT_R8G8_SNORM: |
| 3673 | case VK_FORMAT_R8G8_SINT: |
| 3674 | case VK_FORMAT_R8G8_SSCALED: |
| 3675 | case VK_FORMAT_R8G8_SRGB: |
| 3676 | format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRg; |
| 3677 | break; |
| 3678 | default: |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3679 | mesa_loge("Unsupported format: %d", pImageCreateInfo->format); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3680 | abort(); |
| 3681 | } |
Yilong Li | 6dc9b5b | 2020-07-17 21:05:22 -0700 | [diff] [blame] | 3682 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3683 | fidl::Arena arena; |
| 3684 | fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena); |
| 3685 | createParams.set_width(pImageCreateInfo->extent.width) |
| 3686 | .set_height(pImageCreateInfo->extent.height) |
| 3687 | .set_format(format) |
| 3688 | .set_memory_property( |
| 3689 | fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal); |
| 3690 | |
| 3691 | auto result = mControlDevice->CreateColorBuffer2(std::move(vmo_copy), |
| 3692 | std::move(createParams)); |
| 3693 | if (!result.ok() || result->res != ZX_OK) { |
| 3694 | if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3695 | mesa_logd( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3696 | "CreateColorBuffer: color buffer already " |
| 3697 | "exists\n"); |
| 3698 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3699 | mesa_loge("CreateColorBuffer failed: %d:%d", result.status(), |
| 3700 | GET_STATUS_SAFE(result, res)); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3701 | abort(); |
| 3702 | } |
| 3703 | } |
Yilong Li | 6dc9b5b | 2020-07-17 21:05:22 -0700 | [diff] [blame] | 3704 | } |
Yilong Li | f8eda75 | 2020-07-01 19:19:45 -0700 | [diff] [blame] | 3705 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3706 | |
| 3707 | if (pBufferConstraintsInfo) { |
| 3708 | fidl::Arena arena; |
| 3709 | fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena); |
| 3710 | createParams.set_size(arena, pBufferConstraintsInfo->createInfo.size) |
| 3711 | .set_memory_property( |
| 3712 | fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal); |
| 3713 | |
| 3714 | auto result = |
| 3715 | mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams)); |
| 3716 | if (!result.ok() || result->is_error()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3717 | mesa_loge("CreateBuffer2 failed: %d:%d", result.status(), |
| 3718 | GET_STATUS_SAFE(result, error_value())); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3719 | abort(); |
| 3720 | } |
| 3721 | } |
| 3722 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3723 | mesa_logw( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3724 | "Dedicated image / buffer not available. Cannot create " |
| 3725 | "BufferCollection to export VMOs."); |
| 3726 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 3727 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3728 | } |
| 3729 | |
| 3730 | if (vmo_handle != ZX_HANDLE_INVALID) { |
| 3731 | zx::vmo vmo_copy; |
| 3732 | zx_status_t status = |
| 3733 | zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, vmo_copy.reset_and_get_address()); |
| 3734 | if (status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3735 | mesa_loge("Failed to duplicate VMO: %d", status); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3736 | abort(); |
| 3737 | } |
| 3738 | zx_status_t status2 = ZX_OK; |
| 3739 | |
| 3740 | auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy)); |
| 3741 | if (!result.ok() || result->res != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3742 | mesa_loge("GetBufferHandle failed: %d:%d", result.status(), |
| 3743 | GET_STATUS_SAFE(result, res)); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3744 | } else { |
| 3745 | fuchsia_hardware_goldfish::wire::BufferHandleType handle_type = result->type; |
| 3746 | uint32_t buffer_handle = result->id; |
| 3747 | |
| 3748 | if (handle_type == fuchsia_hardware_goldfish::wire::BufferHandleType::kBuffer) { |
| 3749 | importBufferInfo.buffer = buffer_handle; |
| 3750 | vk_append_struct(&structChainIter, &importBufferInfo); |
| 3751 | } else { |
| 3752 | importCbInfo.colorBuffer = buffer_handle; |
| 3753 | vk_append_struct(&structChainIter, &importCbInfo); |
| 3754 | } |
| 3755 | } |
| 3756 | } |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 3757 | #endif |
| 3758 | |
Jason Macnak | 6d3d7b2 | 2024-04-01 16:48:53 -0700 | [diff] [blame] | 3759 | VirtGpuResourcePtr colorBufferBlob = nullptr; |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 3760 | #if defined(LINUX_GUEST_BUILD) |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3761 | if (exportDmabuf) { |
| 3762 | VirtGpuDevice* instance = VirtGpuDevice::getInstance(); |
Gurchetan Singh | 46aac19 | 2024-06-26 16:31:32 -0700 | [diff] [blame] | 3763 | hasDedicatedImage = |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3764 | dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE); |
Gurchetan Singh | 46aac19 | 2024-06-26 16:31:32 -0700 | [diff] [blame] | 3765 | hasDedicatedBuffer = |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3766 | dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3767 | |
| 3768 | if (hasDedicatedImage) { |
| 3769 | VkImageCreateInfo imageCreateInfo; |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 3770 | bool isDmaBufImage = false; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3771 | { |
| 3772 | AutoLock<RecursiveLock> lock(mLock); |
| 3773 | |
| 3774 | auto it = info_VkImage.find(dedicatedAllocInfoPtr->image); |
| 3775 | if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED; |
| 3776 | const auto& imageInfo = it->second; |
| 3777 | |
| 3778 | imageCreateInfo = imageInfo.createInfo; |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 3779 | isDmaBufImage = imageInfo.isDmaBufImage; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3780 | } |
Aaron Ruby | 379a5ed | 2024-04-23 14:57:55 -0400 | [diff] [blame] | 3781 | |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 3782 | // TODO (b/326956485): Support DRM format modifiers for dmabuf memory |
| 3783 | // For now, can only externalize memory for linear images |
| 3784 | if (isDmaBufImage) { |
| 3785 | const VkImageSubresource imageSubresource = { |
| 3786 | .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, |
| 3787 | .mipLevel = 0, |
| 3788 | .arrayLayer = 0, |
| 3789 | }; |
| 3790 | VkSubresourceLayout subResourceLayout; |
Aaron Ruby | a8140f4 | 2024-05-10 15:50:44 -0400 | [diff] [blame] | 3791 | on_vkGetImageSubresourceLayout(context, device, dedicatedAllocInfoPtr->image, |
| 3792 | &imageSubresource, &subResourceLayout); |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 3793 | if (!subResourceLayout.rowPitch) { |
Gurchetan Singh | 32cf849 | 2024-06-14 23:14:36 -0700 | [diff] [blame] | 3794 | mesa_loge("Failed to query stride for VirtGpu resource creation."); |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 3795 | return VK_ERROR_INITIALIZATION_FAILED; |
| 3796 | } |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3797 | |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 3798 | uint32_t virglFormat = gfxstream::vk::getVirglFormat(imageCreateInfo.format); |
| 3799 | if (!virglFormat) { |
| 3800 | mesa_loge("Unsupported VK format for VirtGpu resource, vkFormat: 0x%x", |
| 3801 | imageCreateInfo.format); |
| 3802 | return VK_ERROR_FORMAT_NOT_SUPPORTED; |
| 3803 | } |
| 3804 | const uint32_t target = PIPE_TEXTURE_2D; |
| 3805 | uint32_t bind = VIRGL_BIND_RENDER_TARGET; |
| 3806 | if (VK_IMAGE_TILING_LINEAR == imageCreateInfo.tiling) { |
| 3807 | bind |= VIRGL_BIND_LINEAR; |
| 3808 | } |
| 3809 | colorBufferBlob = instance->createResource( |
| 3810 | imageCreateInfo.extent.width, imageCreateInfo.extent.height, |
| 3811 | subResourceLayout.rowPitch, virglFormat, target, bind); |
| 3812 | if (!colorBufferBlob) { |
| 3813 | mesa_loge("Failed to create colorBuffer resource for Image memory"); |
| 3814 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 3815 | } |
| 3816 | if (!colorBufferBlob->wait()) { |
| 3817 | mesa_loge("Failed to wait for colorBuffer resource for Image memory"); |
| 3818 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 3819 | } |
| 3820 | } else { |
| 3821 | mesa_logw( |
| 3822 | "The VkMemoryDedicatedAllocateInfo::image associated with VkDeviceMemory " |
| 3823 | "allocation cannot be used to create exportable resource " |
| 3824 | "(VkExportMemoryAllocateInfo).\n"); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3825 | } |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 3826 | } else if (hasDedicatedBuffer) { |
| 3827 | mesa_logw( |
| 3828 | "VkDeviceMemory allocated with VkMemoryDedicatedAllocateInfo::buffer cannot be " |
| 3829 | "exported (VkExportMemoryAllocateInfo)"); |
| 3830 | } else { |
| 3831 | mesa_logw( |
| 3832 | "VkDeviceMemory is not exportable (VkExportMemoryAllocateInfo). Requires " |
| 3833 | "VkMemoryDedicatedAllocateInfo::image to create external resource."); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3834 | } |
| 3835 | } |
| 3836 | |
| 3837 | if (importDmabuf) { |
| 3838 | VirtGpuExternalHandle importHandle = {}; |
| 3839 | importHandle.osHandle = importFdInfoPtr->fd; |
| 3840 | importHandle.type = kMemHandleDmabuf; |
| 3841 | |
| 3842 | auto instance = VirtGpuDevice::getInstance(); |
| 3843 | colorBufferBlob = instance->importBlob(importHandle); |
| 3844 | if (!colorBufferBlob) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3845 | mesa_loge("%s: Failed to import colorBuffer resource\n", __func__); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3846 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 3847 | } |
| 3848 | } |
| 3849 | |
| 3850 | if (colorBufferBlob) { |
| 3851 | importCbInfo.colorBuffer = colorBufferBlob->getResourceHandle(); |
| 3852 | vk_append_struct(&structChainIter, &importCbInfo); |
| 3853 | } |
| 3854 | #endif |
| 3855 | |
| 3856 | if (ahw || colorBufferBlob || !requestedMemoryIsHostVisible) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3857 | input_result = |
| 3858 | enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */); |
Lingfeng Yang | 35e9c6a | 2018-12-25 17:13:36 -0800 | [diff] [blame] | 3859 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3860 | if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result); |
Lingfeng Yang | 35e9c6a | 2018-12-25 17:13:36 -0800 | [diff] [blame] | 3861 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3862 | VkDeviceSize allocationSize = finalAllocInfo.allocationSize; |
| 3863 | setDeviceMemoryInfo(device, *pMemory, 0, nullptr, finalAllocInfo.memoryTypeIndex, ahw, |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3864 | isImport, vmo_handle, colorBufferBlob); |
Lingfeng Yang | 35e9c6a | 2018-12-25 17:13:36 -0800 | [diff] [blame] | 3865 | |
Yahan Zhou | 285f857 | 2021-06-09 18:35:04 -0700 | [diff] [blame] | 3866 | _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT; |
Lingfeng Yang | 131d5a4 | 2018-11-30 12:00:33 -0800 | [diff] [blame] | 3867 | } |
| 3868 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3869 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 3870 | if (vmo_handle != ZX_HANDLE_INVALID) { |
| 3871 | input_result = |
| 3872 | enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */); |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 3873 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3874 | // Get VMO handle rights, and only use allowed rights to map the |
| 3875 | // host memory. |
| 3876 | zx_info_handle_basic handle_info; |
| 3877 | zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info, |
| 3878 | sizeof(handle_info), nullptr, nullptr); |
| 3879 | if (status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3880 | mesa_loge("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle, |
| 3881 | status); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3882 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 3883 | } |
| 3884 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3885 | zx_vm_option_t vm_permission = 0u; |
| 3886 | vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0; |
| 3887 | vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0; |
| 3888 | |
| 3889 | zx_paddr_t addr; |
| 3890 | status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0, |
| 3891 | finalAllocInfo.allocationSize, &addr); |
| 3892 | if (status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3893 | mesa_loge("%s: cannot map vmar: status %d.", __func__, status); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3894 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 3895 | } |
| 3896 | |
| 3897 | setDeviceMemoryInfo(device, *pMemory, finalAllocInfo.allocationSize, |
| 3898 | reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex, |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 3899 | /*ahw=*/nullptr, isImport, vmo_handle, /*blobPtr=*/nullptr); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3900 | return VK_SUCCESS; |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 3901 | } |
Kaiyi Li | 6a76b33 | 2022-08-23 08:10:59 -0700 | [diff] [blame] | 3902 | #endif |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 3903 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3904 | // Host visible memory with direct mapping |
| 3905 | VkResult result = getCoherentMemory(&finalAllocInfo, enc, device, pMemory); |
| 3906 | if (result != VK_SUCCESS) return result; |
| 3907 | |
| 3908 | _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT; |
| 3909 | } |
| 3910 | |
| 3911 | void ResourceTracker::on_vkFreeMemory(void* context, VkDevice device, VkDeviceMemory memory, |
| 3912 | const VkAllocationCallbacks* pAllocateInfo) { |
| 3913 | AutoLock<RecursiveLock> lock(mLock); |
| 3914 | |
| 3915 | auto it = info_VkDeviceMemory.find(memory); |
| 3916 | if (it == info_VkDeviceMemory.end()) return; |
| 3917 | auto& info = it->second; |
| 3918 | uint64_t memoryObjectId = (uint64_t)(void*)memory; |
| 3919 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 3920 | if (info.ahw) { |
| 3921 | memoryObjectId = getAHardwareBufferId(info.ahw); |
| 3922 | } |
| 3923 | #endif |
| 3924 | |
| 3925 | emitDeviceMemoryReport(info_VkDevice[device], |
| 3926 | info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT |
| 3927 | : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT, |
| 3928 | memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY, |
| 3929 | (uint64_t)(void*)memory); |
Lingfeng Yang | 35e9c6a | 2018-12-25 17:13:36 -0800 | [diff] [blame] | 3930 | |
Yilong Li | 1ce350b | 2020-09-12 18:55:55 -0700 | [diff] [blame] | 3931 | #ifdef VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3932 | if (info.vmoHandle && info.ptr) { |
| 3933 | zx_status_t status = zx_vmar_unmap( |
| 3934 | zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.ptr), info.allocationSize); |
| 3935 | if (status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3936 | mesa_loge("%s: Cannot unmap ptr: status %d", status); |
Yilong Li | 1ce350b | 2020-09-12 18:55:55 -0700 | [diff] [blame] | 3937 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3938 | info.ptr = nullptr; |
| 3939 | } |
Yilong Li | 1ce350b | 2020-09-12 18:55:55 -0700 | [diff] [blame] | 3940 | #endif |
| 3941 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3942 | if (!info.coherentMemory) { |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 3943 | lock.unlock(); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3944 | VkEncoder* enc = (VkEncoder*)context; |
| 3945 | enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */); |
| 3946 | return; |
Lingfeng Yang | e997952 | 2018-12-25 14:44:52 -0800 | [diff] [blame] | 3947 | } |
| 3948 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3949 | auto coherentMemory = freeCoherentMemoryLocked(memory, info); |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 3950 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3951 | // We have to release the lock before we could possibly free a |
| 3952 | // CoherentMemory, because that will call into VkEncoder, which |
| 3953 | // shouldn't be called when the lock is held. |
| 3954 | lock.unlock(); |
| 3955 | coherentMemory = nullptr; |
| 3956 | } |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 3957 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3958 | VkResult ResourceTracker::on_vkMapMemory(void* context, VkResult host_result, VkDevice device, |
| 3959 | VkDeviceMemory memory, VkDeviceSize offset, |
| 3960 | VkDeviceSize size, VkMemoryMapFlags, void** ppData) { |
| 3961 | if (host_result != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3962 | mesa_loge("%s: Host failed to map", __func__); |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 3963 | return host_result; |
| 3964 | } |
| 3965 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3966 | AutoLock<RecursiveLock> lock(mLock); |
| 3967 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3968 | auto deviceMemoryInfoIt = info_VkDeviceMemory.find(memory); |
| 3969 | if (deviceMemoryInfoIt == info_VkDeviceMemory.end()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3970 | mesa_loge("%s: Failed to find VkDeviceMemory.", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3971 | return VK_ERROR_MEMORY_MAP_FAILED; |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 3972 | } |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3973 | auto& deviceMemoryInfo = deviceMemoryInfoIt->second; |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 3974 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3975 | if (deviceMemoryInfo.blobId && !deviceMemoryInfo.coherentMemory && |
| 3976 | !mCaps.params[kParamCreateGuestHandle]) { |
| 3977 | // NOTE: must not hold lock while calling into the encoder. |
| 3978 | lock.unlock(); |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 3979 | VkEncoder* enc = (VkEncoder*)context; |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3980 | VkResult vkResult = enc->vkGetBlobGOOGLE(device, memory, /*doLock*/ false); |
| 3981 | if (vkResult != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 3982 | mesa_loge("%s: Failed to vkGetBlobGOOGLE().", __func__); |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3983 | return vkResult; |
| 3984 | } |
| 3985 | lock.lock(); |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 3986 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3987 | // NOTE: deviceMemoryInfoIt potentially invalidated but deviceMemoryInfo still okay. |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3988 | |
| 3989 | struct VirtGpuCreateBlob createBlob = {}; |
| 3990 | createBlob.blobMem = kBlobMemHost3d; |
| 3991 | createBlob.flags = kBlobFlagMappable; |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3992 | createBlob.blobId = deviceMemoryInfo.blobId; |
| 3993 | createBlob.size = deviceMemoryInfo.coherentMemorySize; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3994 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3995 | auto blob = VirtGpuDevice::getInstance()->createBlob(createBlob); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3996 | if (!blob) return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 3997 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 3998 | VirtGpuResourceMappingPtr mapping = blob->createMapping(); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 3999 | if (!mapping) return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 4000 | |
| 4001 | auto coherentMemory = |
| 4002 | std::make_shared<CoherentMemory>(mapping, createBlob.size, device, memory); |
| 4003 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 4004 | uint8_t* ptr; |
| 4005 | uint64_t offset; |
| 4006 | coherentMemory->subAllocate(deviceMemoryInfo.allocationSize, &ptr, offset); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4007 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 4008 | deviceMemoryInfo.coherentMemoryOffset = offset; |
| 4009 | deviceMemoryInfo.coherentMemory = coherentMemory; |
| 4010 | deviceMemoryInfo.ptr = ptr; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4011 | } |
| 4012 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 4013 | if (!deviceMemoryInfo.ptr) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4014 | mesa_loge("%s: VkDeviceMemory has nullptr.", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4015 | return VK_ERROR_MEMORY_MAP_FAILED; |
| 4016 | } |
| 4017 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 4018 | if (size != VK_WHOLE_SIZE && (deviceMemoryInfo.ptr + offset + size > |
| 4019 | deviceMemoryInfo.ptr + deviceMemoryInfo.allocationSize)) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4020 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4021 | "%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx " |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 4022 | "total 0x%llx", |
| 4023 | __func__, (unsigned long long)deviceMemoryInfo.allocationSize, |
| 4024 | (unsigned long long)offset, (unsigned long long)size, (unsigned long long)offset); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4025 | return VK_ERROR_MEMORY_MAP_FAILED; |
| 4026 | } |
| 4027 | |
Jason Macnak | 24e20f8 | 2024-04-26 15:54:09 -0700 | [diff] [blame] | 4028 | *ppData = deviceMemoryInfo.ptr + offset; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4029 | |
| 4030 | return host_result; |
| 4031 | } |
| 4032 | |
| 4033 | void ResourceTracker::on_vkUnmapMemory(void*, VkDevice, VkDeviceMemory) { |
| 4034 | // no-op |
| 4035 | } |
| 4036 | |
| 4037 | void ResourceTracker::transformImageMemoryRequirements2ForGuest(VkImage image, |
| 4038 | VkMemoryRequirements2* reqs2) { |
| 4039 | AutoLock<RecursiveLock> lock(mLock); |
| 4040 | |
| 4041 | auto it = info_VkImage.find(image); |
| 4042 | if (it == info_VkImage.end()) return; |
| 4043 | |
| 4044 | auto& info = it->second; |
| 4045 | |
| 4046 | if (!info.external || !info.externalCreateInfo.handleTypes) { |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 4047 | transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4048 | return; |
| 4049 | } |
| 4050 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 4051 | transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4052 | |
| 4053 | VkMemoryDedicatedRequirements* dedicatedReqs = |
| 4054 | vk_find_struct<VkMemoryDedicatedRequirements>(reqs2); |
| 4055 | |
| 4056 | if (!dedicatedReqs) return; |
| 4057 | |
| 4058 | transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs); |
| 4059 | } |
| 4060 | |
| 4061 | void ResourceTracker::transformBufferMemoryRequirements2ForGuest(VkBuffer buffer, |
| 4062 | VkMemoryRequirements2* reqs2) { |
| 4063 | AutoLock<RecursiveLock> lock(mLock); |
| 4064 | |
| 4065 | auto it = info_VkBuffer.find(buffer); |
| 4066 | if (it == info_VkBuffer.end()) return; |
| 4067 | |
| 4068 | auto& info = it->second; |
| 4069 | |
| 4070 | if (!info.external || !info.externalCreateInfo.handleTypes) { |
| 4071 | return; |
| 4072 | } |
| 4073 | |
| 4074 | VkMemoryDedicatedRequirements* dedicatedReqs = |
| 4075 | vk_find_struct<VkMemoryDedicatedRequirements>(reqs2); |
| 4076 | |
| 4077 | if (!dedicatedReqs) return; |
| 4078 | |
| 4079 | transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs); |
| 4080 | } |
| 4081 | |
| 4082 | VkResult ResourceTracker::on_vkCreateImage(void* context, VkResult, VkDevice device, |
| 4083 | const VkImageCreateInfo* pCreateInfo, |
| 4084 | const VkAllocationCallbacks* pAllocator, |
| 4085 | VkImage* pImage) { |
| 4086 | VkEncoder* enc = (VkEncoder*)context; |
| 4087 | |
| 4088 | VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo); |
Yahan Zhou | d25d7a1 | 2023-10-24 10:46:30 -0700 | [diff] [blame] | 4089 | if (localCreateInfo.sharingMode != VK_SHARING_MODE_CONCURRENT) { |
| 4090 | localCreateInfo.queueFamilyIndexCount = 0; |
| 4091 | localCreateInfo.pQueueFamilyIndices = nullptr; |
| 4092 | } |
| 4093 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4094 | vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo); |
| 4095 | VkExternalMemoryImageCreateInfo localExtImgCi; |
| 4096 | |
| 4097 | const VkExternalMemoryImageCreateInfo* extImgCiPtr = |
| 4098 | vk_find_struct<VkExternalMemoryImageCreateInfo>(pCreateInfo); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 4099 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4100 | if (extImgCiPtr) { |
| 4101 | localExtImgCi = vk_make_orphan_copy(*extImgCiPtr); |
| 4102 | vk_append_struct(&structChainIter, &localExtImgCi); |
| 4103 | } |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4104 | |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 4105 | #if defined(LINUX_GUEST_BUILD) |
| 4106 | bool isDmaBufImage = false; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 4107 | if (extImgCiPtr && |
| 4108 | (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) { |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 4109 | const wsi_image_create_info* wsiImageCi = |
| 4110 | vk_find_struct<wsi_image_create_info>(pCreateInfo); |
| 4111 | if (wsiImageCi) { |
| 4112 | if (!wsiImageCi->scanout) { |
| 4113 | mesa_logd( |
| 4114 | "gfxstream only supports native DRM image scanout path for Linux WSI " |
| 4115 | "(wsi_image_create_info::scanout)"); |
| 4116 | return VK_ERROR_INITIALIZATION_FAILED; |
| 4117 | } |
| 4118 | // Linux WSI creates swapchain images with VK_IMAGE_CREATE_ALIAS_BIT. Vulkan spec |
| 4119 | // states: "If the pNext chain includes a VkExternalMemoryImageCreateInfo or |
| 4120 | // VkExternalMemoryImageCreateInfoNV structure whose handleTypes member is not 0, it is |
| 4121 | // as if VK_IMAGE_CREATE_ALIAS_BIT is set." To avoid flag mismatches on host driver, |
| 4122 | // remove the VK_IMAGE_CREATE_ALIAS_BIT here. |
| 4123 | localCreateInfo.flags &= ~VK_IMAGE_CREATE_ALIAS_BIT; |
| 4124 | // TODO (b/326956485): DRM format modifiers to support client/compositor awareness |
| 4125 | // For now, override WSI images to use linear tiling, as compositor will default to |
| 4126 | // DRM_FORMAT_MOD_LINEAR. |
| 4127 | localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 4128 | } |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 4129 | isDmaBufImage = true; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 4130 | } |
| 4131 | #endif |
| 4132 | |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4133 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4134 | VkNativeBufferANDROID localAnb; |
| 4135 | const VkNativeBufferANDROID* anbInfoPtr = vk_find_struct<VkNativeBufferANDROID>(pCreateInfo); |
| 4136 | if (anbInfoPtr) { |
| 4137 | localAnb = vk_make_orphan_copy(*anbInfoPtr); |
| 4138 | vk_append_struct(&structChainIter, &localAnb); |
| 4139 | } |
Roman Kiryanov | ec610ae | 2019-04-25 17:11:26 -0700 | [diff] [blame] | 4140 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4141 | VkExternalFormatANDROID localExtFormatAndroid; |
| 4142 | const VkExternalFormatANDROID* extFormatAndroidPtr = |
| 4143 | vk_find_struct<VkExternalFormatANDROID>(pCreateInfo); |
| 4144 | if (extFormatAndroidPtr) { |
| 4145 | localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr); |
Roman Kiryanov | ec610ae | 2019-04-25 17:11:26 -0700 | [diff] [blame] | 4146 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4147 | // Do not append external format android; |
| 4148 | // instead, replace the local image localCreateInfo format |
| 4149 | // with the corresponding Vulkan format |
| 4150 | if (extFormatAndroidPtr->externalFormat) { |
Sergiu | ad91847 | 2024-05-21 16:28:45 +0100 | [diff] [blame] | 4151 | localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4152 | if (localCreateInfo.format == VK_FORMAT_UNDEFINED) |
| 4153 | return VK_ERROR_VALIDATION_FAILED_EXT; |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4154 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4155 | } |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4156 | #endif |
| 4157 | |
David Reveman | e060bbc | 2019-03-18 20:52:46 -0400 | [diff] [blame] | 4158 | #ifdef VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4159 | const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr = |
| 4160 | vk_find_struct<VkBufferCollectionImageCreateInfoFUCHSIA>(pCreateInfo); |
Yilong Li | e12328f | 2022-01-06 03:32:13 -0800 | [diff] [blame] | 4161 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4162 | bool isSysmemBackedMemory = false; |
Yilong Li | e1a9133 | 2020-07-19 17:37:49 -0700 | [diff] [blame] | 4163 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4164 | if (extImgCiPtr && |
| 4165 | (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) { |
| 4166 | isSysmemBackedMemory = true; |
| 4167 | } |
Yilong Li | e1a9133 | 2020-07-19 17:37:49 -0700 | [diff] [blame] | 4168 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4169 | if (extBufferCollectionPtr) { |
| 4170 | const auto& collection = |
| 4171 | *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>( |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 4172 | extBufferCollectionPtr->collection); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4173 | uint32_t index = extBufferCollectionPtr->index; |
| 4174 | zx::vmo vmo; |
David Reveman | 5b7c584 | 2019-02-20 01:06:48 -0500 | [diff] [blame] | 4175 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4176 | fuchsia_sysmem::wire::BufferCollectionInfo2 info; |
Yilong Li | 6dc9b5b | 2020-07-17 21:05:22 -0700 | [diff] [blame] | 4177 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4178 | auto result = collection->WaitForBuffersAllocated(); |
| 4179 | if (result.ok() && result->status == ZX_OK) { |
| 4180 | info = std::move(result->buffer_collection_info); |
| 4181 | if (index < info.buffer_count && info.settings.has_image_format_constraints) { |
| 4182 | vmo = std::move(info.buffers[index].vmo); |
David Reveman | d5d92d6 | 2019-03-29 15:19:25 -0400 | [diff] [blame] | 4183 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4184 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4185 | mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(), |
| 4186 | GET_STATUS_SAFE(result, status)); |
David Reveman | df8d964 | 2019-04-24 12:13:36 -0400 | [diff] [blame] | 4187 | } |
Yilong Li | f610b67 | 2021-03-09 15:08:34 -0800 | [diff] [blame] | 4188 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4189 | if (vmo.is_valid()) { |
| 4190 | zx::vmo vmo_dup; |
| 4191 | if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup); |
| 4192 | status != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4193 | mesa_loge("%s: zx_vmo_duplicate failed: %d", __func__, status); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4194 | abort(); |
| 4195 | } |
| 4196 | |
| 4197 | auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup)); |
| 4198 | if (!buffer_handle_result.ok()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4199 | mesa_loge("%s: GetBufferHandle FIDL error: %d", __func__, |
| 4200 | buffer_handle_result.status()); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4201 | abort(); |
| 4202 | } |
| 4203 | if (buffer_handle_result.value().res == ZX_OK) { |
| 4204 | // Buffer handle already exists. |
| 4205 | // If it is a ColorBuffer, no-op; Otherwise return error. |
| 4206 | if (buffer_handle_result.value().type != |
| 4207 | fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4208 | mesa_loge("%s: BufferHandle %u is not a ColorBuffer", __func__, |
| 4209 | buffer_handle_result.value().id); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4210 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4211 | } |
| 4212 | } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) { |
| 4213 | // Buffer handle not found. Create ColorBuffer based on buffer settings. |
| 4214 | auto format = info.settings.image_format_constraints.pixel_format.type == |
| 4215 | fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8 |
| 4216 | ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba |
| 4217 | : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra; |
| 4218 | |
| 4219 | uint32_t memory_property = |
| 4220 | info.settings.buffer_settings.heap == |
| 4221 | fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal |
| 4222 | ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal |
| 4223 | : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible; |
| 4224 | |
| 4225 | fidl::Arena arena; |
| 4226 | fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena); |
| 4227 | createParams.set_width(info.settings.image_format_constraints.min_coded_width) |
| 4228 | .set_height(info.settings.image_format_constraints.min_coded_height) |
| 4229 | .set_format(format) |
| 4230 | .set_memory_property(memory_property); |
| 4231 | |
| 4232 | auto result = |
| 4233 | mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams)); |
| 4234 | if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4235 | mesa_logd("CreateColorBuffer: color buffer already exists\n"); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4236 | } else if (!result.ok() || result->res != ZX_OK) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4237 | mesa_loge("CreateColorBuffer failed: %d:%d", result.status(), |
| 4238 | GET_STATUS_SAFE(result, res)); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4239 | } |
| 4240 | } |
| 4241 | |
| 4242 | if (info.settings.buffer_settings.heap == |
| 4243 | fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4244 | mesa_logd( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4245 | "%s: Image uses host visible memory heap; set tiling " |
| 4246 | "to linear to match host ImageCreateInfo", |
| 4247 | __func__); |
| 4248 | localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR; |
| 4249 | } |
Yilong Li | f610b67 | 2021-03-09 15:08:34 -0800 | [diff] [blame] | 4250 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4251 | isSysmemBackedMemory = true; |
| 4252 | } |
| 4253 | |
| 4254 | if (isSysmemBackedMemory) { |
| 4255 | localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; |
| 4256 | } |
David Reveman | 5b7c584 | 2019-02-20 01:06:48 -0500 | [diff] [blame] | 4257 | #endif |
| 4258 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4259 | VkResult res; |
| 4260 | VkMemoryRequirements memReqs; |
Lingfeng Yang | 87f1196 | 2019-06-27 16:28:53 +0000 | [diff] [blame] | 4261 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4262 | if (supportsCreateResourcesWithRequirements()) { |
| 4263 | res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage, |
| 4264 | &memReqs, true /* do lock */); |
| 4265 | } else { |
| 4266 | res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */); |
| 4267 | } |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 4268 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4269 | if (res != VK_SUCCESS) return res; |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 4270 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4271 | AutoLock<RecursiveLock> lock(mLock); |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 4272 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4273 | auto it = info_VkImage.find(*pImage); |
| 4274 | if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED; |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 4275 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4276 | auto& info = it->second; |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 4277 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4278 | info.device = device; |
| 4279 | info.createInfo = *pCreateInfo; |
| 4280 | info.createInfo.pNext = nullptr; |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 4281 | |
Yilong Li | 90ffa1c | 2022-06-01 15:10:04 -0700 | [diff] [blame] | 4282 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4283 | if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) { |
| 4284 | info.hasExternalFormat = true; |
Sergiu | ad91847 | 2024-05-21 16:28:45 +0100 | [diff] [blame] | 4285 | info.externalFourccFormat = extFormatAndroidPtr->externalFormat; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4286 | } |
Yilong Li | 90ffa1c | 2022-06-01 15:10:04 -0700 | [diff] [blame] | 4287 | #endif // VK_USE_PLATFORM_ANDROID_KHR |
Kaiyi Li | a713b41 | 2021-09-20 07:03:01 -0700 | [diff] [blame] | 4288 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4289 | if (supportsCreateResourcesWithRequirements()) { |
| 4290 | info.baseRequirementsKnown = true; |
| 4291 | } |
Lingfeng Yang | 88a8daf | 2019-03-01 10:15:55 -0800 | [diff] [blame] | 4292 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4293 | if (extImgCiPtr) { |
| 4294 | info.external = true; |
| 4295 | info.externalCreateInfo = *extImgCiPtr; |
| 4296 | } |
Lingfeng Yang | 87f1196 | 2019-06-27 16:28:53 +0000 | [diff] [blame] | 4297 | |
Yilong Li | e202d55 | 2020-02-06 07:38:16 -0800 | [diff] [blame] | 4298 | #ifdef VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4299 | if (isSysmemBackedMemory) { |
| 4300 | info.isSysmemBackedMemory = true; |
| 4301 | } |
Yilong Li | e202d55 | 2020-02-06 07:38:16 -0800 | [diff] [blame] | 4302 | #endif |
Gurchetan Singh | d56274b | 2023-04-10 10:34:16 -0700 | [diff] [blame] | 4303 | |
| 4304 | // Delete `protocolVersion` check goldfish drivers are gone. |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 4305 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4306 | if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) { |
| 4307 | mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device); |
| 4308 | } |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 4309 | if ((extImgCiPtr && (extImgCiPtr->handleTypes & |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 4310 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4311 | updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex); |
| 4312 | } |
Jason Macnak | 66e397b | 2023-03-15 12:11:22 -0700 | [diff] [blame] | 4313 | #endif |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 4314 | #if defined(LINUX_GUEST_BUILD) |
| 4315 | if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) { |
| 4316 | mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device); |
| 4317 | } |
| 4318 | info.isDmaBufImage = isDmaBufImage; |
| 4319 | if (info.isDmaBufImage) { |
| 4320 | updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex); |
Aaron Ruby | a8140f4 | 2024-05-10 15:50:44 -0400 | [diff] [blame] | 4321 | if (localCreateInfo.tiling == VK_IMAGE_TILING_OPTIMAL) { |
| 4322 | // Linux WSI calls vkGetImageSubresourceLayout() to query the stride for swapchain |
| 4323 | // support. Similarly, stride is also queried from vkGetImageSubresourceLayout() to |
| 4324 | // determine the stride for colorBuffer resource creation (guest-side dmabuf resource). |
| 4325 | // To satisfy valid usage of this API, must call on the linearPeerImage for the VkImage |
| 4326 | // in question. As long as these two use cases match, the rowPitch won't actually be |
| 4327 | // used by WSI. |
| 4328 | VkImageCreateInfo linearPeerImageCreateInfo = { |
| 4329 | .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, |
| 4330 | .pNext = nullptr, |
| 4331 | .flags = {}, |
| 4332 | .imageType = VK_IMAGE_TYPE_2D, |
| 4333 | .format = localCreateInfo.format, |
| 4334 | .extent = localCreateInfo.extent, |
| 4335 | .mipLevels = 1, |
| 4336 | .arrayLayers = 1, |
| 4337 | .samples = VK_SAMPLE_COUNT_1_BIT, |
| 4338 | .tiling = VK_IMAGE_TILING_LINEAR, |
| 4339 | .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, |
| 4340 | .sharingMode = VK_SHARING_MODE_EXCLUSIVE, |
| 4341 | .queueFamilyIndexCount = 0, |
| 4342 | .pQueueFamilyIndices = nullptr, |
| 4343 | .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, |
| 4344 | }; |
| 4345 | res = enc->vkCreateImage(device, &linearPeerImageCreateInfo, pAllocator, |
| 4346 | &info.linearPeerImage, true /* do lock */); |
| 4347 | if (res != VK_SUCCESS) return res; |
| 4348 | } |
Aaron Ruby | 42a8b75 | 2024-05-27 15:37:53 -0400 | [diff] [blame] | 4349 | } |
| 4350 | #endif |
Yilong Li | e202d55 | 2020-02-06 07:38:16 -0800 | [diff] [blame] | 4351 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4352 | if (info.baseRequirementsKnown) { |
| 4353 | transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs); |
| 4354 | info.baseRequirements = memReqs; |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 4355 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4356 | return res; |
| 4357 | } |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 4358 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4359 | VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion( |
| 4360 | void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, |
| 4361 | const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) { |
| 4362 | VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo); |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4363 | |
| 4364 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4365 | const VkExternalFormatANDROID* extFormatAndroidPtr = |
| 4366 | vk_find_struct<VkExternalFormatANDROID>(pCreateInfo); |
| 4367 | if (extFormatAndroidPtr) { |
Sergiu | ad91847 | 2024-05-21 16:28:45 +0100 | [diff] [blame] | 4368 | if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4369 | // We don't support external formats on host and it causes RGB565 |
| 4370 | // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest |
| 4371 | // when passed as an external format. |
| 4372 | // We may consider doing this for all external formats. |
| 4373 | // See b/134771579. |
| 4374 | *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING; |
| 4375 | return VK_SUCCESS; |
| 4376 | } else if (extFormatAndroidPtr->externalFormat) { |
Sergiu | ad91847 | 2024-05-21 16:28:45 +0100 | [diff] [blame] | 4377 | localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat); |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4378 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4379 | } |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4380 | #endif |
| 4381 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4382 | VkEncoder* enc = (VkEncoder*)context; |
| 4383 | VkResult res = enc->vkCreateSamplerYcbcrConversion(device, &localCreateInfo, pAllocator, |
| 4384 | pYcbcrConversion, true /* do lock */); |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4385 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4386 | if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4387 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4388 | "FATAL: vkCreateSamplerYcbcrConversion returned a reserved value " |
| 4389 | "(VK_YCBCR_CONVERSION_DO_NOTHING)"); |
| 4390 | abort(); |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4391 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4392 | return res; |
| 4393 | } |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4394 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4395 | void ResourceTracker::on_vkDestroySamplerYcbcrConversion(void* context, VkDevice device, |
| 4396 | VkSamplerYcbcrConversion ycbcrConversion, |
| 4397 | const VkAllocationCallbacks* pAllocator) { |
| 4398 | VkEncoder* enc = (VkEncoder*)context; |
| 4399 | if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) { |
| 4400 | enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator, |
| 4401 | true /* do lock */); |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4402 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4403 | } |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4404 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4405 | VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR( |
| 4406 | void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, |
| 4407 | const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) { |
| 4408 | VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo); |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4409 | |
Gurchetan Singh | 6c5a79d | 2022-02-10 11:47:59 -0800 | [diff] [blame] | 4410 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4411 | const VkExternalFormatANDROID* extFormatAndroidPtr = |
| 4412 | vk_find_struct<VkExternalFormatANDROID>(pCreateInfo); |
| 4413 | if (extFormatAndroidPtr) { |
Sergiu | ad91847 | 2024-05-21 16:28:45 +0100 | [diff] [blame] | 4414 | if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4415 | // We don't support external formats on host and it causes RGB565 |
| 4416 | // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest |
| 4417 | // when passed as an external format. |
| 4418 | // We may consider doing this for all external formats. |
| 4419 | // See b/134771579. |
| 4420 | *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING; |
| 4421 | return VK_SUCCESS; |
| 4422 | } else if (extFormatAndroidPtr->externalFormat) { |
Sergiu | ad91847 | 2024-05-21 16:28:45 +0100 | [diff] [blame] | 4423 | localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat); |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4424 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4425 | } |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4426 | #endif |
| 4427 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4428 | VkEncoder* enc = (VkEncoder*)context; |
| 4429 | VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(device, &localCreateInfo, pAllocator, |
| 4430 | pYcbcrConversion, true /* do lock */); |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4431 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4432 | if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4433 | mesa_loge( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4434 | "FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value " |
| 4435 | "(VK_YCBCR_CONVERSION_DO_NOTHING)"); |
| 4436 | abort(); |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4437 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4438 | return res; |
| 4439 | } |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4440 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4441 | void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR( |
| 4442 | void* context, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, |
| 4443 | const VkAllocationCallbacks* pAllocator) { |
| 4444 | VkEncoder* enc = (VkEncoder*)context; |
| 4445 | if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) { |
| 4446 | enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator, |
| 4447 | true /* do lock */); |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4448 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4449 | } |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4450 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4451 | VkResult ResourceTracker::on_vkCreateSampler(void* context, VkResult, VkDevice device, |
| 4452 | const VkSamplerCreateInfo* pCreateInfo, |
| 4453 | const VkAllocationCallbacks* pAllocator, |
| 4454 | VkSampler* pSampler) { |
| 4455 | VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo); |
| 4456 | vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo); |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4457 | |
Yilong Li | 256f18f | 2020-06-01 23:41:22 +0000 | [diff] [blame] | 4458 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4459 | VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo; |
| 4460 | const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo = |
| 4461 | vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo); |
| 4462 | if (samplerYcbcrConversionInfo) { |
| 4463 | if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) { |
| 4464 | localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo); |
| 4465 | vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo); |
Roman Kiryanov | 004f48c | 2019-06-28 14:59:32 -0700 | [diff] [blame] | 4466 | } |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 4467 | } |
| 4468 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4469 | VkSamplerCustomBorderColorCreateInfoEXT localVkSamplerCustomBorderColorCreateInfo; |
| 4470 | const VkSamplerCustomBorderColorCreateInfoEXT* samplerCustomBorderColorCreateInfo = |
| 4471 | vk_find_struct<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo); |
| 4472 | if (samplerCustomBorderColorCreateInfo) { |
| 4473 | localVkSamplerCustomBorderColorCreateInfo = |
| 4474 | vk_make_orphan_copy(*samplerCustomBorderColorCreateInfo); |
| 4475 | vk_append_struct(&structChainIter, &localVkSamplerCustomBorderColorCreateInfo); |
| 4476 | } |
| 4477 | #endif |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4478 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4479 | VkEncoder* enc = (VkEncoder*)context; |
| 4480 | return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */); |
| 4481 | } |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4482 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4483 | void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties( |
| 4484 | void* context, VkPhysicalDevice physicalDevice, |
| 4485 | const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, |
| 4486 | VkExternalFenceProperties* pExternalFenceProperties) { |
| 4487 | (void)context; |
| 4488 | (void)physicalDevice; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4489 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4490 | pExternalFenceProperties->exportFromImportedHandleTypes = 0; |
| 4491 | pExternalFenceProperties->compatibleHandleTypes = 0; |
| 4492 | pExternalFenceProperties->externalFenceFeatures = 0; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4493 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4494 | bool syncFd = pExternalFenceInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT; |
| 4495 | |
| 4496 | if (!syncFd) { |
| 4497 | return; |
| 4498 | } |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4499 | |
Gurchetan Singh | 6c5a79d | 2022-02-10 11:47:59 -0800 | [diff] [blame] | 4500 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4501 | pExternalFenceProperties->exportFromImportedHandleTypes = |
| 4502 | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT; |
| 4503 | pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT; |
| 4504 | pExternalFenceProperties->externalFenceFeatures = |
| 4505 | VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT | VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4506 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4507 | } |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4508 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4509 | void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR( |
| 4510 | void* context, VkPhysicalDevice physicalDevice, |
| 4511 | const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, |
| 4512 | VkExternalFenceProperties* pExternalFenceProperties) { |
| 4513 | on_vkGetPhysicalDeviceExternalFenceProperties(context, physicalDevice, pExternalFenceInfo, |
| 4514 | pExternalFenceProperties); |
| 4515 | } |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4516 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4517 | VkResult ResourceTracker::on_vkCreateFence(void* context, VkResult input_result, VkDevice device, |
| 4518 | const VkFenceCreateInfo* pCreateInfo, |
| 4519 | const VkAllocationCallbacks* pAllocator, |
| 4520 | VkFence* pFence) { |
| 4521 | VkEncoder* enc = (VkEncoder*)context; |
| 4522 | VkFenceCreateInfo finalCreateInfo = *pCreateInfo; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4523 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4524 | const VkExportFenceCreateInfo* exportFenceInfoPtr = |
| 4525 | vk_find_struct<VkExportFenceCreateInfo>(pCreateInfo); |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4526 | |
Gurchetan Singh | 6c5a79d | 2022-02-10 11:47:59 -0800 | [diff] [blame] | 4527 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4528 | bool exportSyncFd = exportFenceInfoPtr && (exportFenceInfoPtr->handleTypes & |
| 4529 | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT); |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4530 | #endif |
| 4531 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4532 | input_result = |
| 4533 | enc->vkCreateFence(device, &finalCreateInfo, pAllocator, pFence, true /* do lock */); |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4534 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4535 | if (input_result != VK_SUCCESS) return input_result; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4536 | |
Gurchetan Singh | 6c5a79d | 2022-02-10 11:47:59 -0800 | [diff] [blame] | 4537 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4538 | if (exportSyncFd) { |
| 4539 | if (!mFeatureInfo->hasVirtioGpuNativeSync) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4540 | mesa_logi("%s: ensure sync device\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4541 | ensureSyncDeviceFd(); |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4542 | } |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4543 | |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4544 | mesa_logi("%s: getting fence info\n", __func__); |
Gurchetan Singh | 6c906de | 2021-10-21 17:09:00 -0700 | [diff] [blame] | 4545 | AutoLock<RecursiveLock> lock(mLock); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4546 | auto it = info_VkFence.find(*pFence); |
| 4547 | |
| 4548 | if (it == info_VkFence.end()) return VK_ERROR_INITIALIZATION_FAILED; |
| 4549 | |
| 4550 | auto& info = it->second; |
| 4551 | |
| 4552 | info.external = true; |
| 4553 | info.exportFenceCreateInfo = *exportFenceInfoPtr; |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4554 | mesa_logi("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence)); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4555 | // syncFd is still -1 because we expect user to explicitly |
| 4556 | // export it via vkGetFenceFdKHR |
| 4557 | } |
| 4558 | #endif |
| 4559 | |
| 4560 | return input_result; |
| 4561 | } |
| 4562 | |
| 4563 | void ResourceTracker::on_vkDestroyFence(void* context, VkDevice device, VkFence fence, |
| 4564 | const VkAllocationCallbacks* pAllocator) { |
| 4565 | VkEncoder* enc = (VkEncoder*)context; |
| 4566 | enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */); |
| 4567 | } |
| 4568 | |
| 4569 | VkResult ResourceTracker::on_vkResetFences(void* context, VkResult, VkDevice device, |
| 4570 | uint32_t fenceCount, const VkFence* pFences) { |
| 4571 | VkEncoder* enc = (VkEncoder*)context; |
| 4572 | VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */); |
| 4573 | |
| 4574 | if (res != VK_SUCCESS) return res; |
| 4575 | |
| 4576 | if (!fenceCount) return res; |
| 4577 | |
| 4578 | // Permanence: temporary |
| 4579 | // on fence reset, close the fence fd |
| 4580 | // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again |
| 4581 | AutoLock<RecursiveLock> lock(mLock); |
| 4582 | for (uint32_t i = 0; i < fenceCount; ++i) { |
| 4583 | VkFence fence = pFences[i]; |
| 4584 | auto it = info_VkFence.find(fence); |
| 4585 | auto& info = it->second; |
| 4586 | if (!info.external) continue; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4587 | |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 4588 | #if GFXSTREAM_ENABLE_GUEST_GOLDFISH |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4589 | if (info.syncFd >= 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4590 | mesa_logi("%s: resetting fence. make fd -1\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4591 | goldfish_sync_signal(info.syncFd); |
| 4592 | auto* syncHelper = |
| 4593 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 4594 | syncHelper->close(info.syncFd); |
| 4595 | info.syncFd = -1; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4596 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4597 | #endif |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4598 | } |
| 4599 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4600 | return res; |
| 4601 | } |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4602 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4603 | VkResult ResourceTracker::on_vkImportFenceFdKHR(void* context, VkResult, VkDevice device, |
| 4604 | const VkImportFenceFdInfoKHR* pImportFenceFdInfo) { |
| 4605 | (void)context; |
| 4606 | (void)device; |
| 4607 | (void)pImportFenceFdInfo; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4608 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4609 | // Transference: copy |
| 4610 | // meaning dup() the incoming fd |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4611 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4612 | VkEncoder* enc = (VkEncoder*)context; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4613 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4614 | bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4615 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4616 | if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4617 | |
Gurchetan Singh | 6c5a79d | 2022-02-10 11:47:59 -0800 | [diff] [blame] | 4618 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4619 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4620 | bool syncFdImport = pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4621 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4622 | if (!syncFdImport) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4623 | mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4624 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4625 | } |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4626 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4627 | AutoLock<RecursiveLock> lock(mLock); |
| 4628 | auto it = info_VkFence.find(pImportFenceFdInfo->fence); |
| 4629 | if (it == info_VkFence.end()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4630 | mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4631 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4632 | } |
| 4633 | |
| 4634 | auto& info = it->second; |
| 4635 | |
| 4636 | auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 4637 | #if GFXSTREAM_ENABLE_GUEST_GOLDFISH |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4638 | if (info.syncFd >= 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4639 | mesa_logi("%s: previous sync fd exists, close it\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4640 | goldfish_sync_signal(info.syncFd); |
| 4641 | syncHelper->close(info.syncFd); |
| 4642 | } |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 4643 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4644 | |
| 4645 | if (pImportFenceFdInfo->fd < 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4646 | mesa_logi("%s: import -1, set to -1 and exit\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4647 | info.syncFd = -1; |
| 4648 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4649 | mesa_logi("%s: import actual fd, dup and close()\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4650 | info.syncFd = syncHelper->dup(pImportFenceFdInfo->fd); |
| 4651 | syncHelper->close(pImportFenceFdInfo->fd); |
| 4652 | } |
| 4653 | return VK_SUCCESS; |
| 4654 | #else |
| 4655 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4656 | #endif |
| 4657 | } |
| 4658 | |
| 4659 | VkResult ResourceTracker::on_vkGetFenceFdKHR(void* context, VkResult, VkDevice device, |
| 4660 | const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) { |
| 4661 | // export operation. |
| 4662 | // first check if fence is signaled |
| 4663 | // then if so, return -1 |
| 4664 | // else, queue work |
| 4665 | |
| 4666 | VkEncoder* enc = (VkEncoder*)context; |
| 4667 | |
| 4668 | bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE; |
| 4669 | |
| 4670 | if (!hasFence) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4671 | mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4672 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4673 | } |
| 4674 | |
| 4675 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 4676 | bool syncFdExport = pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT; |
| 4677 | |
| 4678 | if (!syncFdExport) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4679 | mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4680 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4681 | } |
| 4682 | |
| 4683 | VkResult currentFenceStatus = |
| 4684 | enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */); |
| 4685 | |
| 4686 | if (VK_ERROR_DEVICE_LOST == currentFenceStatus) { // Other error |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4687 | mesa_logi("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4688 | *pFd = -1; |
| 4689 | return VK_ERROR_DEVICE_LOST; |
| 4690 | } |
| 4691 | |
| 4692 | if (VK_NOT_READY == currentFenceStatus || VK_SUCCESS == currentFenceStatus) { |
| 4693 | // Fence is valid. We also create a new sync fd for a signaled |
| 4694 | // fence, because ANGLE will use the returned fd directly to |
| 4695 | // implement eglDupNativeFenceFDANDROID, where -1 is only returned |
| 4696 | // when error occurs. |
Gurchetan Singh | 6c906de | 2021-10-21 17:09:00 -0700 | [diff] [blame] | 4697 | AutoLock<RecursiveLock> lock(mLock); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4698 | |
| 4699 | auto it = info_VkFence.find(pGetFdInfo->fence); |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4700 | if (it == info_VkFence.end()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4701 | mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__); |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4702 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4703 | } |
| 4704 | |
| 4705 | auto& info = it->second; |
| 4706 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4707 | bool syncFdCreated = info.external && (info.exportFenceCreateInfo.handleTypes & |
| 4708 | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT); |
| 4709 | |
| 4710 | if (!syncFdCreated) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4711 | mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4712 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4713 | } |
| 4714 | |
| 4715 | if (mFeatureInfo->hasVirtioGpuNativeSync) { |
| 4716 | VkResult result; |
| 4717 | int64_t osHandle; |
| 4718 | uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence); |
| 4719 | |
| 4720 | result = createFence(device, hostFenceHandle, osHandle); |
| 4721 | if (result != VK_SUCCESS) return result; |
| 4722 | |
| 4723 | *pFd = osHandle; |
| 4724 | } else { |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 4725 | #if GFXSTREAM_ENABLE_GUEST_GOLDFISH |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4726 | goldfish_sync_queue_work( |
| 4727 | mSyncDeviceFd, get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */, |
| 4728 | GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */, |
| 4729 | pFd); |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 4730 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4731 | } |
| 4732 | |
| 4733 | // relinquish ownership |
| 4734 | info.syncFd = -1; |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4735 | mesa_logi("%s: got fd: %d\n", __func__, *pFd); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4736 | return VK_SUCCESS; |
| 4737 | } |
| 4738 | return VK_ERROR_DEVICE_LOST; |
| 4739 | #else |
| 4740 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 4741 | #endif |
| 4742 | } |
| 4743 | |
| 4744 | VkResult ResourceTracker::on_vkWaitForFences(void* context, VkResult, VkDevice device, |
| 4745 | uint32_t fenceCount, const VkFence* pFences, |
| 4746 | VkBool32 waitAll, uint64_t timeout) { |
| 4747 | VkEncoder* enc = (VkEncoder*)context; |
| 4748 | |
| 4749 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 4750 | std::vector<VkFence> fencesExternal; |
| 4751 | std::vector<int> fencesExternalWaitFds; |
| 4752 | std::vector<VkFence> fencesNonExternal; |
| 4753 | |
| 4754 | AutoLock<RecursiveLock> lock(mLock); |
| 4755 | |
| 4756 | for (uint32_t i = 0; i < fenceCount; ++i) { |
| 4757 | auto it = info_VkFence.find(pFences[i]); |
| 4758 | if (it == info_VkFence.end()) continue; |
| 4759 | const auto& info = it->second; |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4760 | if (info.syncFd >= 0) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4761 | fencesExternal.push_back(pFences[i]); |
| 4762 | fencesExternalWaitFds.push_back(info.syncFd); |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4763 | } else { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4764 | fencesNonExternal.push_back(pFences[i]); |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4765 | } |
Lingfeng Yang | cd2d8fe | 2019-08-16 12:21:50 -0700 | [diff] [blame] | 4766 | } |
| 4767 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4768 | lock.unlock(); |
Gurchetan Singh | 72a6758 | 2022-09-09 13:52:16 -0700 | [diff] [blame] | 4769 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4770 | if (fencesExternal.empty()) { |
| 4771 | // No need for work pool, just wait with host driver. |
| 4772 | return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout, |
| 4773 | true /* do lock */); |
| 4774 | } else { |
| 4775 | // Depending on wait any or wait all, |
| 4776 | // schedule a wait group with waitAny/waitAll |
| 4777 | std::vector<WorkPool::Task> tasks; |
Gurchetan Singh | 72a6758 | 2022-09-09 13:52:16 -0700 | [diff] [blame] | 4778 | |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4779 | mesa_logi("%s: scheduling ext waits\n", __func__); |
Gurchetan Singh | 72a6758 | 2022-09-09 13:52:16 -0700 | [diff] [blame] | 4780 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4781 | for (auto fd : fencesExternalWaitFds) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4782 | mesa_logi("%s: wait on %d\n", __func__, fd); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4783 | tasks.push_back([fd] { |
| 4784 | auto* syncHelper = |
| 4785 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 4786 | syncHelper->wait(fd, 3000); |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4787 | mesa_logi("done waiting on fd %d\n", fd); |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 4788 | }); |
| 4789 | } |
| 4790 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4791 | if (!fencesNonExternal.empty()) { |
| 4792 | tasks.push_back( |
| 4793 | [this, fencesNonExternal /* copy of vector */, device, waitAll, timeout] { |
| 4794 | auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc(); |
| 4795 | auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn); |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4796 | mesa_logi("%s: vkWaitForFences to host\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4797 | vkEncoder->vkWaitForFences(device, fencesNonExternal.size(), |
| 4798 | fencesNonExternal.data(), waitAll, timeout, |
| 4799 | true /* do lock */); |
| 4800 | }); |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 4801 | } |
Lingfeng Yang | e9e77d5 | 2020-03-25 14:01:58 -0700 | [diff] [blame] | 4802 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4803 | auto waitGroupHandle = mWorkPool.schedule(tasks); |
Lingfeng Yang | e9e77d5 | 2020-03-25 14:01:58 -0700 | [diff] [blame] | 4804 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4805 | // Convert timeout to microseconds from nanoseconds |
| 4806 | bool waitRes = false; |
| 4807 | if (waitAll) { |
| 4808 | waitRes = mWorkPool.waitAll(waitGroupHandle, timeout / 1000); |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 4809 | } else { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4810 | waitRes = mWorkPool.waitAny(waitGroupHandle, timeout / 1000); |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 4811 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4812 | |
| 4813 | if (waitRes) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4814 | mesa_logi("%s: VK_SUCCESS\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4815 | return VK_SUCCESS; |
| 4816 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4817 | mesa_logi("%s: VK_TIMEOUT\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4818 | return VK_TIMEOUT; |
| 4819 | } |
| 4820 | } |
| 4821 | #else |
| 4822 | return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout, true /* do lock */); |
| 4823 | #endif |
| 4824 | } |
| 4825 | |
| 4826 | VkResult ResourceTracker::on_vkCreateDescriptorPool(void* context, VkResult, VkDevice device, |
| 4827 | const VkDescriptorPoolCreateInfo* pCreateInfo, |
| 4828 | const VkAllocationCallbacks* pAllocator, |
| 4829 | VkDescriptorPool* pDescriptorPool) { |
| 4830 | VkEncoder* enc = (VkEncoder*)context; |
| 4831 | |
| 4832 | VkResult res = enc->vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool, |
| 4833 | true /* do lock */); |
| 4834 | |
| 4835 | if (res != VK_SUCCESS) return res; |
| 4836 | |
| 4837 | VkDescriptorPool pool = *pDescriptorPool; |
| 4838 | |
| 4839 | struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool); |
| 4840 | dp->allocInfo = new DescriptorPoolAllocationInfo; |
| 4841 | dp->allocInfo->device = device; |
| 4842 | dp->allocInfo->createFlags = pCreateInfo->flags; |
| 4843 | dp->allocInfo->maxSets = pCreateInfo->maxSets; |
| 4844 | dp->allocInfo->usedSets = 0; |
| 4845 | |
| 4846 | for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) { |
| 4847 | dp->allocInfo->descriptorCountInfo.push_back({ |
| 4848 | pCreateInfo->pPoolSizes[i].type, pCreateInfo->pPoolSizes[i].descriptorCount, |
| 4849 | 0, /* used */ |
| 4850 | }); |
Lingfeng Yang | e9e77d5 | 2020-03-25 14:01:58 -0700 | [diff] [blame] | 4851 | } |
| 4852 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4853 | if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) { |
| 4854 | std::vector<uint64_t> poolIds(pCreateInfo->maxSets); |
Lingfeng Yang | 03354c7 | 2020-03-26 13:00:51 -0700 | [diff] [blame] | 4855 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4856 | uint32_t count = pCreateInfo->maxSets; |
| 4857 | enc->vkCollectDescriptorPoolIdsGOOGLE(device, pool, &count, poolIds.data(), |
| 4858 | true /* do lock */); |
Lingfeng Yang | 03354c7 | 2020-03-26 13:00:51 -0700 | [diff] [blame] | 4859 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4860 | dp->allocInfo->freePoolIds = poolIds; |
Lingfeng Yang | 03354c7 | 2020-03-26 13:00:51 -0700 | [diff] [blame] | 4861 | } |
| 4862 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4863 | return res; |
| 4864 | } |
Lingfeng Yang | 03354c7 | 2020-03-26 13:00:51 -0700 | [diff] [blame] | 4865 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4866 | void ResourceTracker::on_vkDestroyDescriptorPool(void* context, VkDevice device, |
| 4867 | VkDescriptorPool descriptorPool, |
| 4868 | const VkAllocationCallbacks* pAllocator) { |
| 4869 | if (!descriptorPool) return; |
Lingfeng Yang | 03354c7 | 2020-03-26 13:00:51 -0700 | [diff] [blame] | 4870 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4871 | VkEncoder* enc = (VkEncoder*)context; |
Lingfeng Yang | 03354c7 | 2020-03-26 13:00:51 -0700 | [diff] [blame] | 4872 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4873 | clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool); |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 4874 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4875 | enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */); |
| 4876 | } |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 4877 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4878 | VkResult ResourceTracker::on_vkResetDescriptorPool(void* context, VkResult, VkDevice device, |
| 4879 | VkDescriptorPool descriptorPool, |
| 4880 | VkDescriptorPoolResetFlags flags) { |
| 4881 | if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED; |
| 4882 | |
| 4883 | VkEncoder* enc = (VkEncoder*)context; |
| 4884 | |
| 4885 | VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */); |
| 4886 | |
| 4887 | if (res != VK_SUCCESS) return res; |
| 4888 | |
| 4889 | clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool); |
| 4890 | return res; |
| 4891 | } |
| 4892 | |
| 4893 | VkResult ResourceTracker::on_vkAllocateDescriptorSets( |
| 4894 | void* context, VkResult, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, |
| 4895 | VkDescriptorSet* pDescriptorSets) { |
| 4896 | VkEncoder* enc = (VkEncoder*)context; |
| 4897 | auto ci = pAllocateInfo; |
| 4898 | auto sets = pDescriptorSets; |
| 4899 | if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) { |
| 4900 | // Using the pool ID's we collected earlier from the host |
| 4901 | VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets); |
| 4902 | |
| 4903 | if (poolAllocResult != VK_SUCCESS) return poolAllocResult; |
| 4904 | |
| 4905 | for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) { |
| 4906 | register_VkDescriptorSet(sets[i]); |
| 4907 | VkDescriptorSetLayout setLayout = |
| 4908 | as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout; |
| 4909 | |
| 4910 | // Need to add ref to the set layout in the virtual case |
| 4911 | // because the set itself might not be realized on host at the |
| 4912 | // same time |
| 4913 | struct goldfish_VkDescriptorSetLayout* dsl = |
| 4914 | as_goldfish_VkDescriptorSetLayout(setLayout); |
| 4915 | ++dsl->layoutInfo->refcount; |
| 4916 | } |
| 4917 | } else { |
| 4918 | VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */); |
| 4919 | |
| 4920 | if (allocRes != VK_SUCCESS) return allocRes; |
| 4921 | |
| 4922 | for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) { |
| 4923 | applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]); |
| 4924 | fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]); |
| 4925 | } |
| 4926 | } |
| 4927 | |
| 4928 | return VK_SUCCESS; |
| 4929 | } |
| 4930 | |
| 4931 | VkResult ResourceTracker::on_vkFreeDescriptorSets(void* context, VkResult, VkDevice device, |
| 4932 | VkDescriptorPool descriptorPool, |
| 4933 | uint32_t descriptorSetCount, |
| 4934 | const VkDescriptorSet* pDescriptorSets) { |
| 4935 | VkEncoder* enc = (VkEncoder*)context; |
| 4936 | |
| 4937 | // Bit of robustness so that we can double free descriptor sets |
| 4938 | // and do other invalid usages |
| 4939 | // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070 |
| 4940 | // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets) |
| 4941 | std::vector<VkDescriptorSet> toActuallyFree; |
| 4942 | { |
| 4943 | AutoLock<RecursiveLock> lock(mLock); |
| 4944 | |
| 4945 | // Pool was destroyed |
| 4946 | if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) { |
| 4947 | return VK_SUCCESS; |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 4948 | } |
| 4949 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4950 | if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool)) return VK_SUCCESS; |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 4951 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4952 | std::vector<VkDescriptorSet> existingDescriptorSets; |
| 4953 | ; |
| 4954 | |
| 4955 | // Check if this descriptor set was in the pool's set of allocated descriptor sets, |
| 4956 | // to guard against double free (Double free is allowed by the client) |
| 4957 | { |
| 4958 | auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets; |
| 4959 | |
| 4960 | for (uint32_t i = 0; i < descriptorSetCount; ++i) { |
| 4961 | if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 4962 | mesa_logi( |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 4963 | "%s: Warning: descriptor set %p not found in pool. Was this " |
| 4964 | "double-freed?\n", |
| 4965 | __func__, (void*)pDescriptorSets[i]); |
| 4966 | continue; |
| 4967 | } |
| 4968 | |
| 4969 | auto it = info_VkDescriptorSet.find(pDescriptorSets[i]); |
| 4970 | if (it == info_VkDescriptorSet.end()) continue; |
| 4971 | |
| 4972 | existingDescriptorSets.push_back(pDescriptorSets[i]); |
| 4973 | } |
| 4974 | } |
| 4975 | |
| 4976 | for (auto set : existingDescriptorSets) { |
| 4977 | if (removeDescriptorSetFromPool(set, |
| 4978 | mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)) { |
| 4979 | toActuallyFree.push_back(set); |
| 4980 | } |
| 4981 | } |
| 4982 | |
| 4983 | if (toActuallyFree.empty()) return VK_SUCCESS; |
| 4984 | } |
| 4985 | |
| 4986 | if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) { |
| 4987 | // In the batched set update case, decrement refcount on the set layout |
| 4988 | // and only free on host if we satisfied a pending allocation on the |
| 4989 | // host. |
| 4990 | for (uint32_t i = 0; i < toActuallyFree.size(); ++i) { |
| 4991 | VkDescriptorSetLayout setLayout = |
| 4992 | as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout; |
| 4993 | decDescriptorSetLayoutRef(context, device, setLayout, nullptr); |
| 4994 | } |
| 4995 | freeDescriptorSetsIfHostAllocated(enc, device, (uint32_t)toActuallyFree.size(), |
| 4996 | toActuallyFree.data()); |
| 4997 | } else { |
| 4998 | // In the non-batched set update case, just free them directly. |
| 4999 | enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(), |
| 5000 | toActuallyFree.data(), true /* do lock */); |
| 5001 | } |
| 5002 | return VK_SUCCESS; |
| 5003 | } |
| 5004 | |
| 5005 | VkResult ResourceTracker::on_vkCreateDescriptorSetLayout( |
| 5006 | void* context, VkResult, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, |
| 5007 | const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) { |
| 5008 | VkEncoder* enc = (VkEncoder*)context; |
| 5009 | |
| 5010 | VkResult res = enc->vkCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout, |
| 5011 | true /* do lock */); |
| 5012 | |
| 5013 | if (res != VK_SUCCESS) return res; |
| 5014 | |
| 5015 | struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(*pSetLayout); |
| 5016 | dsl->layoutInfo = new DescriptorSetLayoutInfo; |
| 5017 | for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) { |
| 5018 | dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]); |
| 5019 | } |
| 5020 | dsl->layoutInfo->refcount = 1; |
| 5021 | |
| 5022 | return res; |
| 5023 | } |
| 5024 | |
| 5025 | void ResourceTracker::on_vkUpdateDescriptorSets(void* context, VkDevice device, |
| 5026 | uint32_t descriptorWriteCount, |
| 5027 | const VkWriteDescriptorSet* pDescriptorWrites, |
| 5028 | uint32_t descriptorCopyCount, |
| 5029 | const VkCopyDescriptorSet* pDescriptorCopies) { |
| 5030 | VkEncoder* enc = (VkEncoder*)context; |
| 5031 | |
| 5032 | std::vector<VkDescriptorImageInfo> transformedImageInfos; |
| 5033 | std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount); |
| 5034 | |
| 5035 | memcpy(transformedWrites.data(), pDescriptorWrites, |
| 5036 | sizeof(VkWriteDescriptorSet) * descriptorWriteCount); |
| 5037 | |
| 5038 | size_t imageInfosNeeded = 0; |
| 5039 | for (uint32_t i = 0; i < descriptorWriteCount; ++i) { |
| 5040 | if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue; |
| 5041 | if (!transformedWrites[i].pImageInfo) continue; |
| 5042 | |
| 5043 | imageInfosNeeded += transformedWrites[i].descriptorCount; |
| 5044 | } |
| 5045 | |
| 5046 | transformedImageInfos.resize(imageInfosNeeded); |
| 5047 | |
| 5048 | size_t imageInfoIndex = 0; |
| 5049 | for (uint32_t i = 0; i < descriptorWriteCount; ++i) { |
| 5050 | if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue; |
| 5051 | if (!transformedWrites[i].pImageInfo) continue; |
| 5052 | |
| 5053 | for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) { |
| 5054 | transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j]; |
| 5055 | ++imageInfoIndex; |
| 5056 | } |
| 5057 | transformedWrites[i].pImageInfo = |
| 5058 | &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount]; |
| 5059 | } |
| 5060 | |
| 5061 | { |
| 5062 | // Validate and filter samplers |
| 5063 | AutoLock<RecursiveLock> lock(mLock); |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 5064 | size_t imageInfoIndex = 0; |
| 5065 | for (uint32_t i = 0; i < descriptorWriteCount; ++i) { |
| 5066 | if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue; |
| 5067 | if (!transformedWrites[i].pImageInfo) continue; |
| 5068 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5069 | bool isImmutableSampler = descriptorBindingIsImmutableSampler( |
| 5070 | transformedWrites[i].dstSet, transformedWrites[i].dstBinding); |
| 5071 | |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 5072 | for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5073 | if (isImmutableSampler) { |
| 5074 | transformedImageInfos[imageInfoIndex].sampler = 0; |
| 5075 | } |
| 5076 | transformedImageInfos[imageInfoIndex] = |
| 5077 | filterNonexistentSampler(transformedImageInfos[imageInfoIndex]); |
Lingfeng Yang | ffb94af | 2021-04-22 15:16:35 -0700 | [diff] [blame] | 5078 | ++imageInfoIndex; |
| 5079 | } |
Lingfeng Yang | faa2d45 | 2021-02-17 14:15:07 -0800 | [diff] [blame] | 5080 | } |
Lingfeng Yang | 03354c7 | 2020-03-26 13:00:51 -0700 | [diff] [blame] | 5081 | } |
| 5082 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5083 | if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) { |
| 5084 | for (uint32_t i = 0; i < descriptorWriteCount; ++i) { |
| 5085 | VkDescriptorSet set = transformedWrites[i].dstSet; |
| 5086 | doEmulatedDescriptorWrite(&transformedWrites[i], |
| 5087 | as_goldfish_VkDescriptorSet(set)->reified); |
| 5088 | } |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 5089 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5090 | for (uint32_t i = 0; i < descriptorCopyCount; ++i) { |
| 5091 | doEmulatedDescriptorCopy( |
| 5092 | &pDescriptorCopies[i], |
| 5093 | as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified, |
| 5094 | as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified); |
| 5095 | } |
| 5096 | } else { |
| 5097 | enc->vkUpdateDescriptorSets(device, descriptorWriteCount, transformedWrites.data(), |
| 5098 | descriptorCopyCount, pDescriptorCopies, true /* do lock */); |
| 5099 | } |
| 5100 | } |
| 5101 | |
| 5102 | void ResourceTracker::on_vkDestroyImage(void* context, VkDevice device, VkImage image, |
| 5103 | const VkAllocationCallbacks* pAllocator) { |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 5104 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5105 | auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 5106 | { |
| 5107 | AutoLock<RecursiveLock> lock(mLock); // do not guard encoder may cause |
| 5108 | // deadlock b/243339973 |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 5109 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5110 | // Wait for any pending QSRIs to prevent a race between the Gfxstream host |
| 5111 | // potentially processing the below `vkDestroyImage()` from the VK encoder |
| 5112 | // command stream before processing a previously submitted |
| 5113 | // `VIRTIO_GPU_NATIVE_SYNC_VULKAN_QSRI_EXPORT` from the virtio-gpu command |
| 5114 | // stream which relies on the image existing. |
| 5115 | auto imageInfoIt = info_VkImage.find(image); |
| 5116 | if (imageInfoIt != info_VkImage.end()) { |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 5117 | auto& imageInfo = imageInfoIt->second; |
| 5118 | for (int syncFd : imageInfo.pendingQsriSyncFds) { |
Jason Macnak | a194bbf | 2023-07-20 10:21:13 -0700 | [diff] [blame] | 5119 | int syncWaitRet = syncHelper->wait(syncFd, 3000); |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 5120 | if (syncWaitRet < 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 5121 | mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d", |
| 5122 | __func__, strerror(errno), errno); |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 5123 | } |
Jason Macnak | a194bbf | 2023-07-20 10:21:13 -0700 | [diff] [blame] | 5124 | syncHelper->close(syncFd); |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 5125 | } |
| 5126 | imageInfo.pendingQsriSyncFds.clear(); |
| 5127 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5128 | } |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 5129 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5130 | VkEncoder* enc = (VkEncoder*)context; |
Aaron Ruby | a8140f4 | 2024-05-10 15:50:44 -0400 | [diff] [blame] | 5131 | #if defined(LINUX_GUEST_BUILD) |
| 5132 | auto imageInfoIt = info_VkImage.find(image); |
| 5133 | if (imageInfoIt != info_VkImage.end()) { |
| 5134 | auto& imageInfo = imageInfoIt->second; |
| 5135 | if (imageInfo.linearPeerImage) { |
| 5136 | enc->vkDestroyImage(device, imageInfo.linearPeerImage, pAllocator, true /* do lock */); |
| 5137 | } |
| 5138 | } |
| 5139 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5140 | enc->vkDestroyImage(device, image, pAllocator, true /* do lock */); |
| 5141 | } |
| 5142 | |
| 5143 | void ResourceTracker::on_vkGetImageMemoryRequirements(void* context, VkDevice device, VkImage image, |
| 5144 | VkMemoryRequirements* pMemoryRequirements) { |
| 5145 | AutoLock<RecursiveLock> lock(mLock); |
| 5146 | |
| 5147 | auto it = info_VkImage.find(image); |
| 5148 | if (it == info_VkImage.end()) return; |
| 5149 | |
| 5150 | auto& info = it->second; |
| 5151 | |
| 5152 | if (info.baseRequirementsKnown) { |
| 5153 | *pMemoryRequirements = info.baseRequirements; |
| 5154 | return; |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5155 | } |
| 5156 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5157 | lock.unlock(); |
| 5158 | |
| 5159 | VkEncoder* enc = (VkEncoder*)context; |
| 5160 | |
| 5161 | enc->vkGetImageMemoryRequirements(device, image, pMemoryRequirements, true /* do lock */); |
| 5162 | |
| 5163 | lock.lock(); |
| 5164 | |
| 5165 | transformImageMemoryRequirementsForGuestLocked(image, pMemoryRequirements); |
| 5166 | |
| 5167 | info.baseRequirementsKnown = true; |
| 5168 | info.baseRequirements = *pMemoryRequirements; |
| 5169 | } |
| 5170 | |
| 5171 | void ResourceTracker::on_vkGetImageMemoryRequirements2(void* context, VkDevice device, |
| 5172 | const VkImageMemoryRequirementsInfo2* pInfo, |
| 5173 | VkMemoryRequirements2* pMemoryRequirements) { |
| 5174 | VkEncoder* enc = (VkEncoder*)context; |
| 5175 | enc->vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */); |
| 5176 | transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements); |
| 5177 | } |
| 5178 | |
| 5179 | void ResourceTracker::on_vkGetImageMemoryRequirements2KHR( |
| 5180 | void* context, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, |
| 5181 | VkMemoryRequirements2* pMemoryRequirements) { |
| 5182 | VkEncoder* enc = (VkEncoder*)context; |
| 5183 | enc->vkGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */); |
| 5184 | transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements); |
| 5185 | } |
| 5186 | |
Aaron Ruby | a8140f4 | 2024-05-10 15:50:44 -0400 | [diff] [blame] | 5187 | void ResourceTracker::on_vkGetImageSubresourceLayout(void* context, VkDevice device, VkImage image, |
| 5188 | const VkImageSubresource* pSubresource, |
| 5189 | VkSubresourceLayout* pLayout) { |
| 5190 | VkEncoder* enc = (VkEncoder*)context; |
| 5191 | VkImage targetImage = image; |
| 5192 | #if defined(LINUX_GUEST_BUILD) |
| 5193 | auto it = info_VkImage.find(image); |
| 5194 | if (it == info_VkImage.end()) return; |
| 5195 | const auto& info = it->second; |
| 5196 | if (info.linearPeerImage) { |
| 5197 | targetImage = info.linearPeerImage; |
| 5198 | } |
| 5199 | #endif |
| 5200 | enc->vkGetImageSubresourceLayout(device, targetImage, pSubresource, pLayout, |
| 5201 | true /* do lock */); |
| 5202 | } |
| 5203 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5204 | VkResult ResourceTracker::on_vkBindImageMemory(void* context, VkResult, VkDevice device, |
| 5205 | VkImage image, VkDeviceMemory memory, |
| 5206 | VkDeviceSize memoryOffset) { |
| 5207 | VkEncoder* enc = (VkEncoder*)context; |
| 5208 | // Do not forward calls with invalid handles to host. |
| 5209 | if (info_VkDeviceMemory.find(memory) == info_VkDeviceMemory.end() || |
| 5210 | info_VkImage.find(image) == info_VkImage.end()) { |
| 5211 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 5212 | } |
| 5213 | return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */); |
| 5214 | } |
| 5215 | |
| 5216 | VkResult ResourceTracker::on_vkBindImageMemory2(void* context, VkResult, VkDevice device, |
| 5217 | uint32_t bindingCount, |
| 5218 | const VkBindImageMemoryInfo* pBindInfos) { |
| 5219 | VkEncoder* enc = (VkEncoder*)context; |
| 5220 | |
| 5221 | if (bindingCount < 1 || !pBindInfos) { |
| 5222 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
Yilong Li | e202d55 | 2020-02-06 07:38:16 -0800 | [diff] [blame] | 5223 | } |
| 5224 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5225 | for (uint32_t i = 0; i < bindingCount; i++) { |
| 5226 | const VkBindImageMemoryInfo& bimi = pBindInfos[i]; |
Lingfeng Yang | 87f1196 | 2019-06-27 16:28:53 +0000 | [diff] [blame] | 5227 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5228 | auto imageIt = info_VkImage.find(bimi.image); |
| 5229 | if (imageIt == info_VkImage.end()) { |
Yilong Li | e135221 | 2022-05-19 16:26:11 -0700 | [diff] [blame] | 5230 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 5231 | } |
Jason Macnak | e0ac288 | 2023-06-07 09:13:51 -0700 | [diff] [blame] | 5232 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5233 | if (bimi.memory != VK_NULL_HANDLE) { |
| 5234 | auto memoryIt = info_VkDeviceMemory.find(bimi.memory); |
| 5235 | if (memoryIt == info_VkDeviceMemory.end()) { |
Jason Macnak | e0ac288 | 2023-06-07 09:13:51 -0700 | [diff] [blame] | 5236 | return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| 5237 | } |
Jason Macnak | e0ac288 | 2023-06-07 09:13:51 -0700 | [diff] [blame] | 5238 | } |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5239 | } |
| 5240 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5241 | return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */); |
| 5242 | } |
| 5243 | |
| 5244 | VkResult ResourceTracker::on_vkBindImageMemory2KHR(void* context, VkResult result, VkDevice device, |
| 5245 | uint32_t bindingCount, |
| 5246 | const VkBindImageMemoryInfo* pBindInfos) { |
| 5247 | return on_vkBindImageMemory2(context, result, device, bindingCount, pBindInfos); |
| 5248 | } |
| 5249 | |
| 5250 | VkResult ResourceTracker::on_vkCreateBuffer(void* context, VkResult, VkDevice device, |
| 5251 | const VkBufferCreateInfo* pCreateInfo, |
| 5252 | const VkAllocationCallbacks* pAllocator, |
| 5253 | VkBuffer* pBuffer) { |
| 5254 | VkEncoder* enc = (VkEncoder*)context; |
| 5255 | |
| 5256 | VkBufferCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo); |
| 5257 | vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo); |
| 5258 | VkExternalMemoryBufferCreateInfo localExtBufCi; |
| 5259 | |
| 5260 | const VkExternalMemoryBufferCreateInfo* extBufCiPtr = |
| 5261 | vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo); |
| 5262 | if (extBufCiPtr) { |
| 5263 | localExtBufCi = vk_make_orphan_copy(*extBufCiPtr); |
| 5264 | vk_append_struct(&structChainIter, &localExtBufCi); |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5265 | } |
| 5266 | |
Yahan Zhou | 8a4da21 | 2023-10-06 11:02:04 -0700 | [diff] [blame] | 5267 | VkBufferOpaqueCaptureAddressCreateInfo localCapAddrCi; |
| 5268 | const VkBufferOpaqueCaptureAddressCreateInfo* pCapAddrCi = |
| 5269 | vk_find_struct<VkBufferOpaqueCaptureAddressCreateInfo>(pCreateInfo); |
| 5270 | if (pCapAddrCi) { |
| 5271 | localCapAddrCi = vk_make_orphan_copy(*pCapAddrCi); |
| 5272 | vk_append_struct(&structChainIter, &localCapAddrCi); |
| 5273 | } |
| 5274 | |
| 5275 | VkBufferDeviceAddressCreateInfoEXT localDevAddrCi; |
| 5276 | const VkBufferDeviceAddressCreateInfoEXT* pDevAddrCi = |
| 5277 | vk_find_struct<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo); |
| 5278 | if (pDevAddrCi) { |
| 5279 | localDevAddrCi = vk_make_orphan_copy(*pDevAddrCi); |
| 5280 | vk_append_struct(&structChainIter, &localDevAddrCi); |
| 5281 | } |
| 5282 | |
Yilong Li | f8eda75 | 2020-07-01 19:19:45 -0700 | [diff] [blame] | 5283 | #ifdef VK_USE_PLATFORM_FUCHSIA |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5284 | Optional<zx::vmo> vmo; |
| 5285 | bool isSysmemBackedMemory = false; |
Yilong Li | f8eda75 | 2020-07-01 19:19:45 -0700 | [diff] [blame] | 5286 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5287 | if (extBufCiPtr && |
| 5288 | (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) { |
| 5289 | isSysmemBackedMemory = true; |
| 5290 | } |
| 5291 | |
| 5292 | const auto* extBufferCollectionPtr = |
| 5293 | vk_find_struct<VkBufferCollectionBufferCreateInfoFUCHSIA>(pCreateInfo); |
| 5294 | |
| 5295 | if (extBufferCollectionPtr) { |
| 5296 | const auto& collection = |
| 5297 | *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>( |
| 5298 | extBufferCollectionPtr->collection); |
| 5299 | uint32_t index = extBufferCollectionPtr->index; |
| 5300 | |
| 5301 | auto result = collection->WaitForBuffersAllocated(); |
| 5302 | if (result.ok() && result->status == ZX_OK) { |
| 5303 | auto& info = result->buffer_collection_info; |
| 5304 | if (index < info.buffer_count) { |
| 5305 | vmo = gfxstream::guest::makeOptional(std::move(info.buffers[index].vmo)); |
| 5306 | } |
| 5307 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 5308 | mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(), |
| 5309 | GET_STATUS_SAFE(result, status)); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5310 | } |
| 5311 | |
| 5312 | if (vmo && vmo->is_valid()) { |
| 5313 | fidl::Arena arena; |
| 5314 | fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena); |
| 5315 | createParams.set_size(arena, pCreateInfo->size) |
| 5316 | .set_memory_property(fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal); |
| 5317 | |
| 5318 | auto result = mControlDevice->CreateBuffer2(std::move(*vmo), createParams); |
| 5319 | if (!result.ok() || |
| 5320 | (result->is_error() != ZX_OK && result->error_value() != ZX_ERR_ALREADY_EXISTS)) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 5321 | mesa_loge("CreateBuffer2 failed: %d:%d", result.status(), |
| 5322 | GET_STATUS_SAFE(result, error_value())); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5323 | } |
Yilong Li | e1a9133 | 2020-07-19 17:37:49 -0700 | [diff] [blame] | 5324 | isSysmemBackedMemory = true; |
| 5325 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5326 | } |
Yilong Li | f8eda75 | 2020-07-01 19:19:45 -0700 | [diff] [blame] | 5327 | #endif // VK_USE_PLATFORM_FUCHSIA |
| 5328 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5329 | VkResult res; |
| 5330 | VkMemoryRequirements memReqs; |
Lingfeng Yang | 87f1196 | 2019-06-27 16:28:53 +0000 | [diff] [blame] | 5331 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5332 | if (supportsCreateResourcesWithRequirements()) { |
| 5333 | res = enc->vkCreateBufferWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, |
| 5334 | pBuffer, &memReqs, true /* do lock */); |
| 5335 | } else { |
| 5336 | res = |
| 5337 | enc->vkCreateBuffer(device, &localCreateInfo, pAllocator, pBuffer, true /* do lock */); |
| 5338 | } |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 5339 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5340 | if (res != VK_SUCCESS) return res; |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 5341 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 5342 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5343 | if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) { |
| 5344 | mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device); |
| 5345 | } |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 5346 | if (extBufCiPtr && |
| 5347 | ((extBufCiPtr->handleTypes & |
| 5348 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) || |
| 5349 | (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5350 | updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex); |
| 5351 | } |
Jason Macnak | 66e397b | 2023-03-15 12:11:22 -0700 | [diff] [blame] | 5352 | #endif |
| 5353 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5354 | AutoLock<RecursiveLock> lock(mLock); |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 5355 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5356 | auto it = info_VkBuffer.find(*pBuffer); |
| 5357 | if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED; |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 5358 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5359 | auto& info = it->second; |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 5360 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5361 | info.createInfo = localCreateInfo; |
| 5362 | info.createInfo.pNext = nullptr; |
Lingfeng Yang | 3e87e85 | 2019-02-19 14:12:49 -0800 | [diff] [blame] | 5363 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5364 | if (supportsCreateResourcesWithRequirements()) { |
Lingfeng Yang | 87f1196 | 2019-06-27 16:28:53 +0000 | [diff] [blame] | 5365 | info.baseRequirementsKnown = true; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5366 | info.baseRequirements = memReqs; |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5367 | } |
| 5368 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5369 | if (extBufCiPtr) { |
| 5370 | info.external = true; |
| 5371 | info.externalCreateInfo = *extBufCiPtr; |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5372 | } |
| 5373 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5374 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 5375 | if (isSysmemBackedMemory) { |
| 5376 | info.isSysmemBackedMemory = true; |
| 5377 | } |
| 5378 | #endif |
| 5379 | |
| 5380 | return res; |
| 5381 | } |
| 5382 | |
| 5383 | void ResourceTracker::on_vkDestroyBuffer(void* context, VkDevice device, VkBuffer buffer, |
| 5384 | const VkAllocationCallbacks* pAllocator) { |
| 5385 | VkEncoder* enc = (VkEncoder*)context; |
| 5386 | enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */); |
| 5387 | } |
| 5388 | |
| 5389 | void ResourceTracker::on_vkGetBufferMemoryRequirements(void* context, VkDevice device, |
| 5390 | VkBuffer buffer, |
| 5391 | VkMemoryRequirements* pMemoryRequirements) { |
| 5392 | AutoLock<RecursiveLock> lock(mLock); |
| 5393 | |
| 5394 | auto it = info_VkBuffer.find(buffer); |
| 5395 | if (it == info_VkBuffer.end()) return; |
| 5396 | |
| 5397 | auto& info = it->second; |
| 5398 | |
| 5399 | if (info.baseRequirementsKnown) { |
| 5400 | *pMemoryRequirements = info.baseRequirements; |
| 5401 | return; |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5402 | } |
| 5403 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5404 | lock.unlock(); |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5405 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5406 | VkEncoder* enc = (VkEncoder*)context; |
| 5407 | enc->vkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements, true /* do lock */); |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5408 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5409 | lock.lock(); |
Lingfeng Yang | 4af5f32 | 2019-02-14 08:10:28 -0800 | [diff] [blame] | 5410 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5411 | info.baseRequirementsKnown = true; |
| 5412 | info.baseRequirements = *pMemoryRequirements; |
| 5413 | } |
| 5414 | |
| 5415 | void ResourceTracker::on_vkGetBufferMemoryRequirements2( |
| 5416 | void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, |
| 5417 | VkMemoryRequirements2* pMemoryRequirements) { |
| 5418 | VkEncoder* enc = (VkEncoder*)context; |
| 5419 | enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */); |
| 5420 | transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements); |
| 5421 | } |
| 5422 | |
| 5423 | void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR( |
| 5424 | void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, |
| 5425 | VkMemoryRequirements2* pMemoryRequirements) { |
| 5426 | VkEncoder* enc = (VkEncoder*)context; |
| 5427 | enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */); |
| 5428 | transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements); |
| 5429 | } |
| 5430 | |
| 5431 | VkResult ResourceTracker::on_vkBindBufferMemory(void* context, VkResult, VkDevice device, |
| 5432 | VkBuffer buffer, VkDeviceMemory memory, |
| 5433 | VkDeviceSize memoryOffset) { |
| 5434 | VkEncoder* enc = (VkEncoder*)context; |
| 5435 | return enc->vkBindBufferMemory(device, buffer, memory, memoryOffset, true /* do lock */); |
| 5436 | } |
| 5437 | |
| 5438 | VkResult ResourceTracker::on_vkBindBufferMemory2(void* context, VkResult, VkDevice device, |
| 5439 | uint32_t bindInfoCount, |
| 5440 | const VkBindBufferMemoryInfo* pBindInfos) { |
| 5441 | VkEncoder* enc = (VkEncoder*)context; |
| 5442 | return enc->vkBindBufferMemory2(device, bindInfoCount, pBindInfos, true /* do lock */); |
| 5443 | } |
| 5444 | |
| 5445 | VkResult ResourceTracker::on_vkBindBufferMemory2KHR(void* context, VkResult, VkDevice device, |
| 5446 | uint32_t bindInfoCount, |
| 5447 | const VkBindBufferMemoryInfo* pBindInfos) { |
| 5448 | VkEncoder* enc = (VkEncoder*)context; |
| 5449 | return enc->vkBindBufferMemory2KHR(device, bindInfoCount, pBindInfos, true /* do lock */); |
| 5450 | } |
| 5451 | |
| 5452 | VkResult ResourceTracker::on_vkCreateSemaphore(void* context, VkResult input_result, |
| 5453 | VkDevice device, |
| 5454 | const VkSemaphoreCreateInfo* pCreateInfo, |
| 5455 | const VkAllocationCallbacks* pAllocator, |
| 5456 | VkSemaphore* pSemaphore) { |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 5457 | (void)input_result; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5458 | VkEncoder* enc = (VkEncoder*)context; |
| 5459 | |
| 5460 | VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo; |
| 5461 | |
| 5462 | const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr = |
| 5463 | vk_find_struct<VkExportSemaphoreCreateInfoKHR>(pCreateInfo); |
| 5464 | |
| 5465 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 5466 | bool exportEvent = |
| 5467 | exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes & |
| 5468 | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA); |
| 5469 | |
| 5470 | if (exportEvent) { |
| 5471 | finalCreateInfo.pNext = nullptr; |
| 5472 | // If we have timeline semaphores externally, leave it there. |
| 5473 | const VkSemaphoreTypeCreateInfo* typeCi = |
| 5474 | vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo); |
| 5475 | if (typeCi) finalCreateInfo.pNext = typeCi; |
| 5476 | } |
| 5477 | #endif |
| 5478 | |
Gurchetan Singh | 6c5a79d | 2022-02-10 11:47:59 -0800 | [diff] [blame] | 5479 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5480 | bool exportSyncFd = exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes & |
| 5481 | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT); |
| 5482 | |
| 5483 | if (exportSyncFd) { |
| 5484 | finalCreateInfo.pNext = nullptr; |
| 5485 | // If we have timeline semaphores externally, leave it there. |
| 5486 | const VkSemaphoreTypeCreateInfo* typeCi = |
| 5487 | vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo); |
| 5488 | if (typeCi) finalCreateInfo.pNext = typeCi; |
| 5489 | } |
| 5490 | #endif |
| 5491 | input_result = enc->vkCreateSemaphore(device, &finalCreateInfo, pAllocator, pSemaphore, |
| 5492 | true /* do lock */); |
| 5493 | |
| 5494 | zx_handle_t event_handle = ZX_HANDLE_INVALID; |
| 5495 | |
| 5496 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 5497 | if (exportEvent) { |
| 5498 | zx_event_create(0, &event_handle); |
| 5499 | } |
| 5500 | #endif |
| 5501 | |
| 5502 | AutoLock<RecursiveLock> lock(mLock); |
| 5503 | |
| 5504 | auto it = info_VkSemaphore.find(*pSemaphore); |
| 5505 | if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED; |
| 5506 | |
| 5507 | auto& info = it->second; |
| 5508 | |
| 5509 | info.device = device; |
| 5510 | info.eventHandle = event_handle; |
| 5511 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 5512 | info.eventKoid = getEventKoid(info.eventHandle); |
| 5513 | #endif |
| 5514 | |
| 5515 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 5516 | if (exportSyncFd) { |
| 5517 | if (mFeatureInfo->hasVirtioGpuNativeSync) { |
| 5518 | VkResult result; |
| 5519 | int64_t osHandle; |
| 5520 | uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore); |
| 5521 | |
| 5522 | result = createFence(device, hostFenceHandle, osHandle); |
| 5523 | if (result != VK_SUCCESS) return result; |
| 5524 | |
| 5525 | info.syncFd.emplace(osHandle); |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 5526 | } else { |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 5527 | #if GFXSTREAM_ENABLE_GUEST_GOLDFISH |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5528 | ensureSyncDeviceFd(); |
| 5529 | |
| 5530 | if (exportSyncFd) { |
| 5531 | int syncFd = -1; |
| 5532 | goldfish_sync_queue_work( |
| 5533 | mSyncDeviceFd, get_host_u64_VkSemaphore(*pSemaphore) /* the handle */, |
| 5534 | GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */ |
| 5535 | , |
| 5536 | &syncFd); |
| 5537 | info.syncFd.emplace(syncFd); |
| 5538 | } |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 5539 | #endif |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 5540 | } |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 5541 | } |
Yahan Zhou | a499e44 | 2019-02-26 16:35:01 -0800 | [diff] [blame] | 5542 | #endif |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 5543 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5544 | return VK_SUCCESS; |
| 5545 | } |
| 5546 | |
| 5547 | void ResourceTracker::on_vkDestroySemaphore(void* context, VkDevice device, VkSemaphore semaphore, |
| 5548 | const VkAllocationCallbacks* pAllocator) { |
| 5549 | VkEncoder* enc = (VkEncoder*)context; |
| 5550 | enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */); |
| 5551 | } |
| 5552 | |
| 5553 | // https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR |
| 5554 | // Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership |
| 5555 | // of it to the application. To avoid leaking resources, the application must release ownership |
| 5556 | // of the file descriptor when it is no longer needed. |
| 5557 | VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(void* context, VkResult, VkDevice device, |
| 5558 | const VkSemaphoreGetFdInfoKHR* pGetFdInfo, |
| 5559 | int* pFd) { |
Gurchetan Singh | 6c5a79d | 2022-02-10 11:47:59 -0800 | [diff] [blame] | 5560 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5561 | VkEncoder* enc = (VkEncoder*)context; |
| 5562 | bool getSyncFd = pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT; |
Lingfeng Yang | 5c70112 | 2019-03-05 08:34:46 -0800 | [diff] [blame] | 5563 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5564 | if (getSyncFd) { |
Gurchetan Singh | 6c906de | 2021-10-21 17:09:00 -0700 | [diff] [blame] | 5565 | AutoLock<RecursiveLock> lock(mLock); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5566 | auto it = info_VkSemaphore.find(pGetFdInfo->semaphore); |
| 5567 | if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5568 | auto& semInfo = it->second; |
| 5569 | // syncFd is supposed to have value. |
| 5570 | auto* syncHelper = |
| 5571 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 5572 | *pFd = syncHelper->dup(semInfo.syncFd.value_or(-1)); |
David Reveman | 5b7c584 | 2019-02-20 01:06:48 -0500 | [diff] [blame] | 5573 | return VK_SUCCESS; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5574 | } else { |
| 5575 | // opaque fd |
| 5576 | int hostFd = 0; |
| 5577 | VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */); |
| 5578 | if (result != VK_SUCCESS) { |
Lingfeng Yang | f434441 | 2019-03-18 19:02:09 -0700 | [diff] [blame] | 5579 | return result; |
Yahan Zhou | a499e44 | 2019-02-26 16:35:01 -0800 | [diff] [blame] | 5580 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5581 | *pFd = memfd_create("vk_opaque_fd", 0); |
| 5582 | write(*pFd, &hostFd, sizeof(hostFd)); |
David Reveman | 5b7c584 | 2019-02-20 01:06:48 -0500 | [diff] [blame] | 5583 | return VK_SUCCESS; |
David Reveman | 2471022 | 2019-02-25 02:21:42 -0500 | [diff] [blame] | 5584 | } |
Jason Macnak | 650c0c0 | 2023-07-20 16:06:53 -0700 | [diff] [blame] | 5585 | #else |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5586 | (void)context; |
| 5587 | (void)device; |
| 5588 | (void)pGetFdInfo; |
| 5589 | (void)pFd; |
| 5590 | return VK_ERROR_INCOMPATIBLE_DRIVER; |
Jason Macnak | 650c0c0 | 2023-07-20 16:06:53 -0700 | [diff] [blame] | 5591 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5592 | } |
Lingfeng Yang | 236abc9 | 2018-12-21 20:19:33 -0800 | [diff] [blame] | 5593 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5594 | VkResult ResourceTracker::on_vkImportSemaphoreFdKHR( |
| 5595 | void* context, VkResult input_result, VkDevice device, |
| 5596 | const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) { |
| 5597 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 5598 | VkEncoder* enc = (VkEncoder*)context; |
| 5599 | if (input_result != VK_SUCCESS) { |
Lingfeng Yang | 236abc9 | 2018-12-21 20:19:33 -0800 | [diff] [blame] | 5600 | return input_result; |
| 5601 | } |
| 5602 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5603 | auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 5604 | |
| 5605 | if (pImportSemaphoreFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) { |
| 5606 | VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo; |
Lingfeng Yang | 05d5ea3 | 2019-03-23 00:12:39 -0700 | [diff] [blame] | 5607 | |
Gurchetan Singh | 6c906de | 2021-10-21 17:09:00 -0700 | [diff] [blame] | 5608 | AutoLock<RecursiveLock> lock(mLock); |
Lingfeng Yang | 05d5ea3 | 2019-03-23 00:12:39 -0700 | [diff] [blame] | 5609 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5610 | auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore); |
| 5611 | auto& info = semaphoreIt->second; |
| 5612 | |
| 5613 | if (info.syncFd.value_or(-1) >= 0) { |
| 5614 | syncHelper->close(info.syncFd.value()); |
Lingfeng Yang | 05d5ea3 | 2019-03-23 00:12:39 -0700 | [diff] [blame] | 5615 | } |
| 5616 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5617 | info.syncFd.emplace(pImportSemaphoreFdInfo->fd); |
Lingfeng Yang | 05d5ea3 | 2019-03-23 00:12:39 -0700 | [diff] [blame] | 5618 | |
| 5619 | return VK_SUCCESS; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5620 | } else { |
| 5621 | int fd = pImportSemaphoreFdInfo->fd; |
| 5622 | int err = lseek(fd, 0, SEEK_SET); |
| 5623 | if (err == -1) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 5624 | mesa_loge("lseek fail on import semaphore"); |
Lingfeng Yang | 05d5ea3 | 2019-03-23 00:12:39 -0700 | [diff] [blame] | 5625 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5626 | int hostFd = 0; |
| 5627 | read(fd, &hostFd, sizeof(hostFd)); |
| 5628 | VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo; |
| 5629 | tmpInfo.fd = hostFd; |
| 5630 | VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */); |
| 5631 | syncHelper->close(fd); |
| 5632 | return result; |
Lingfeng Yang | 05d5ea3 | 2019-03-23 00:12:39 -0700 | [diff] [blame] | 5633 | } |
Gurchetan Singh | 6c5a79d | 2022-02-10 11:47:59 -0800 | [diff] [blame] | 5634 | #else |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5635 | (void)context; |
| 5636 | (void)input_result; |
| 5637 | (void)device; |
| 5638 | (void)pImportSemaphoreFdInfo; |
| 5639 | return VK_ERROR_INCOMPATIBLE_DRIVER; |
| 5640 | #endif |
| 5641 | } |
Yilong Li | d1c6919 | 2021-02-05 02:06:07 -0800 | [diff] [blame] | 5642 | |
Aaron Ruby | 16b349b | 2024-04-26 13:52:40 -0400 | [diff] [blame] | 5643 | VkResult ResourceTracker::on_vkGetMemoryFdPropertiesKHR( |
| 5644 | void* context, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, |
| 5645 | VkMemoryFdPropertiesKHR* pMemoryFdProperties) { |
| 5646 | #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR) |
| 5647 | if (!(handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 5648 | mesa_loge("%s: VK_KHR_external_memory_fd behavior not defined for handleType: 0x%x\n", |
| 5649 | __func__, handleType); |
Aaron Ruby | 16b349b | 2024-04-26 13:52:40 -0400 | [diff] [blame] | 5650 | return VK_ERROR_INVALID_EXTERNAL_HANDLE; |
| 5651 | } |
| 5652 | // Sanity-check device |
| 5653 | AutoLock<RecursiveLock> lock(mLock); |
| 5654 | auto deviceIt = info_VkDevice.find(device); |
| 5655 | if (deviceIt == info_VkDevice.end()) { |
| 5656 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5657 | } |
| 5658 | // TODO: Verify FD valid ? |
| 5659 | (void)fd; |
| 5660 | |
| 5661 | if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) { |
| 5662 | mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device); |
| 5663 | } |
| 5664 | |
| 5665 | updateMemoryTypeBits(&pMemoryFdProperties->memoryTypeBits, |
| 5666 | mCaps.vulkanCapset.colorBufferMemoryIndex); |
| 5667 | |
| 5668 | return VK_SUCCESS; |
| 5669 | #else |
| 5670 | (void)context; |
| 5671 | (void)device; |
| 5672 | (void)handleType; |
| 5673 | (void)fd; |
| 5674 | (void)pMemoryFdProperties; |
| 5675 | return VK_ERROR_INCOMPATIBLE_DRIVER; |
| 5676 | #endif |
| 5677 | } |
| 5678 | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 5679 | VkResult ResourceTracker::on_vkGetMemoryFdKHR(void* context, VkResult, VkDevice device, |
| 5680 | const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) { |
| 5681 | #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR) |
| 5682 | if (!pGetFdInfo) return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5683 | if (!pGetFdInfo->memory) return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5684 | |
| 5685 | if (!(pGetFdInfo->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT | |
| 5686 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 5687 | mesa_loge("%s: Export operation not defined for handleType: 0x%x\n", __func__, |
| 5688 | pGetFdInfo->handleType); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 5689 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5690 | } |
| 5691 | // Sanity-check device |
| 5692 | AutoLock<RecursiveLock> lock(mLock); |
| 5693 | auto deviceIt = info_VkDevice.find(device); |
| 5694 | if (deviceIt == info_VkDevice.end()) { |
| 5695 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5696 | } |
| 5697 | |
| 5698 | auto deviceMemIt = info_VkDeviceMemory.find(pGetFdInfo->memory); |
| 5699 | if (deviceMemIt == info_VkDeviceMemory.end()) { |
| 5700 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5701 | } |
| 5702 | auto& info = deviceMemIt->second; |
| 5703 | |
| 5704 | if (!info.blobPtr) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 5705 | mesa_loge("%s: VkDeviceMemory does not have a resource available for export.\n", __func__); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 5706 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5707 | } |
| 5708 | |
| 5709 | VirtGpuExternalHandle handle{}; |
| 5710 | int ret = info.blobPtr->exportBlob(handle); |
| 5711 | if (ret != 0 || handle.osHandle < 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 5712 | mesa_loge("%s: Failed to export host resource to FD.\n", __func__); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 5713 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 5714 | } |
| 5715 | *pFd = handle.osHandle; |
| 5716 | return VK_SUCCESS; |
| 5717 | #else |
| 5718 | (void)context; |
| 5719 | (void)device; |
| 5720 | (void)pGetFdInfo; |
| 5721 | (void)pFd; |
| 5722 | return VK_ERROR_INCOMPATIBLE_DRIVER; |
| 5723 | #endif |
| 5724 | } |
| 5725 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5726 | void ResourceTracker::flushCommandBufferPendingCommandsBottomUp( |
| 5727 | void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) { |
| 5728 | if (workingSet.empty()) return; |
Jason Macnak | d86380c | 2020-09-03 11:02:21 -0700 | [diff] [blame] | 5729 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5730 | std::vector<VkCommandBuffer> nextLevel; |
| 5731 | for (auto commandBuffer : workingSet) { |
Lingfeng Yang | 34eec78 | 2020-09-24 18:22:44 -0700 | [diff] [blame] | 5732 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5733 | forAllObjects(cb->subObjects, [&nextLevel](void* secondary) { |
| 5734 | nextLevel.push_back((VkCommandBuffer)secondary); |
| 5735 | }); |
Lingfeng Yang | 39a276e | 2019-06-17 13:27:22 -0700 | [diff] [blame] | 5736 | } |
| 5737 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5738 | flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel); |
Lingfeng Yang | 256f925 | 2020-07-14 14:27:33 -0700 | [diff] [blame] | 5739 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5740 | // After this point, everyone at the previous level has been flushed |
| 5741 | for (auto cmdbuf : workingSet) { |
| 5742 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf); |
Lingfeng Yang | f89f75d | 2020-07-09 17:42:33 -0700 | [diff] [blame] | 5743 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5744 | // There's no pending commands here, skip. (case 1) |
| 5745 | if (!cb->privateStream) continue; |
Lingfeng Yang | 34eec78 | 2020-09-24 18:22:44 -0700 | [diff] [blame] | 5746 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5747 | unsigned char* writtenPtr = 0; |
| 5748 | size_t written = 0; |
| 5749 | CommandBufferStagingStream* cmdBufStream = |
| 5750 | static_cast<CommandBufferStagingStream*>(cb->privateStream); |
| 5751 | cmdBufStream->getWritten(&writtenPtr, &written); |
Lingfeng Yang | a1d57a5 | 2021-01-08 14:02:14 -0800 | [diff] [blame] | 5752 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5753 | // There's no pending commands here, skip. (case 2, stream created but no new recordings) |
| 5754 | if (!written) continue; |
Lingfeng Yang | a1d57a5 | 2021-01-08 14:02:14 -0800 | [diff] [blame] | 5755 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5756 | // There are pending commands to flush. |
| 5757 | VkEncoder* enc = (VkEncoder*)context; |
| 5758 | VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory(); |
| 5759 | VkDeviceSize dataOffset = 0; |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 5760 | if (mFeatureInfo->hasVulkanAuxCommandMemory) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5761 | // for suballocations, deviceMemory is an alias VkDeviceMemory |
| 5762 | // get underling VkDeviceMemory for given alias |
| 5763 | deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset, |
| 5764 | 1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/, |
| 5765 | nullptr /*typeIndex*/, 0 /*typeIndexCount*/, |
| 5766 | nullptr /*typeBits*/, 0 /*typeBitCounts*/); |
| 5767 | |
| 5768 | // mark stream as flushing before flushing commands |
| 5769 | cmdBufStream->markFlushing(); |
| 5770 | enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory, dataOffset, |
| 5771 | written, true /*do lock*/); |
| 5772 | } else { |
| 5773 | enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr, |
| 5774 | true /* do lock */); |
| 5775 | } |
| 5776 | // Reset this stream. |
| 5777 | // flushing happens on vkQueueSubmit |
| 5778 | // vulkan api states that on queue submit, |
| 5779 | // applications MUST not attempt to modify the command buffer in any way |
| 5780 | // -as the device may be processing the commands recorded to it. |
| 5781 | // It is safe to call reset() here for this reason. |
| 5782 | // Command Buffer associated with this stream will only leave pending state |
| 5783 | // after queue submit is complete and host has read the data |
| 5784 | cmdBufStream->reset(); |
| 5785 | } |
| 5786 | } |
| 5787 | |
| 5788 | uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) { |
| 5789 | if (!supportsAsyncQueueSubmit()) { |
| 5790 | return 0; |
| 5791 | } |
| 5792 | |
| 5793 | struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue); |
| 5794 | if (!q) return 0; |
| 5795 | |
| 5796 | auto lastEncoder = q->lastUsedEncoder; |
| 5797 | |
| 5798 | if (lastEncoder == currentEncoder) return 0; |
| 5799 | |
| 5800 | currentEncoder->incRef(); |
| 5801 | |
| 5802 | q->lastUsedEncoder = currentEncoder; |
| 5803 | |
| 5804 | if (!lastEncoder) return 0; |
| 5805 | |
| 5806 | auto oldSeq = q->sequenceNumber; |
| 5807 | q->sequenceNumber += 2; |
| 5808 | lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */); |
| 5809 | lastEncoder->flush(); |
| 5810 | currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */); |
| 5811 | |
| 5812 | if (lastEncoder->decRef()) { |
| 5813 | q->lastUsedEncoder = nullptr; |
| 5814 | } |
| 5815 | |
| 5816 | return 0; |
| 5817 | } |
| 5818 | |
| 5819 | template <class VkSubmitInfoType> |
| 5820 | void ResourceTracker::flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount, |
| 5821 | const VkSubmitInfoType* pSubmits) { |
| 5822 | std::vector<VkCommandBuffer> toFlush; |
| 5823 | for (uint32_t i = 0; i < submitCount; ++i) { |
| 5824 | for (uint32_t j = 0; j < getCommandBufferCount(pSubmits[i]); ++j) { |
| 5825 | toFlush.push_back(getCommandBuffer(pSubmits[i], j)); |
| 5826 | } |
| 5827 | } |
| 5828 | |
| 5829 | std::unordered_set<VkDescriptorSet> pendingSets; |
| 5830 | collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets); |
| 5831 | commitDescriptorSetUpdates(context, queue, pendingSets); |
| 5832 | |
| 5833 | flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush); |
| 5834 | |
| 5835 | for (auto cb : toFlush) { |
| 5836 | resetCommandBufferPendingTopology(cb); |
| 5837 | } |
| 5838 | } |
| 5839 | |
| 5840 | VkResult ResourceTracker::on_vkQueueSubmit(void* context, VkResult input_result, VkQueue queue, |
| 5841 | uint32_t submitCount, const VkSubmitInfo* pSubmits, |
| 5842 | VkFence fence) { |
| 5843 | AEMU_SCOPED_TRACE("on_vkQueueSubmit"); |
| 5844 | return on_vkQueueSubmitTemplate<VkSubmitInfo>(context, input_result, queue, submitCount, |
| 5845 | pSubmits, fence); |
| 5846 | } |
| 5847 | |
| 5848 | VkResult ResourceTracker::on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue, |
| 5849 | uint32_t submitCount, const VkSubmitInfo2* pSubmits, |
| 5850 | VkFence fence) { |
| 5851 | AEMU_SCOPED_TRACE("on_vkQueueSubmit2"); |
| 5852 | return on_vkQueueSubmitTemplate<VkSubmitInfo2>(context, input_result, queue, submitCount, |
| 5853 | pSubmits, fence); |
| 5854 | } |
| 5855 | |
| 5856 | VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount, |
| 5857 | const VkSubmitInfo* pSubmits, VkFence fence) { |
| 5858 | if (supportsAsyncQueueSubmit()) { |
| 5859 | enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */); |
| 5860 | return VK_SUCCESS; |
| 5861 | } else { |
| 5862 | return enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */); |
| 5863 | } |
| 5864 | } |
| 5865 | |
| 5866 | VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount, |
| 5867 | const VkSubmitInfo2* pSubmits, VkFence fence) { |
| 5868 | if (supportsAsyncQueueSubmit()) { |
| 5869 | enc->vkQueueSubmitAsync2GOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */); |
| 5870 | return VK_SUCCESS; |
| 5871 | } else { |
| 5872 | return enc->vkQueueSubmit2(queue, submitCount, pSubmits, fence, true /* do lock */); |
| 5873 | } |
| 5874 | } |
| 5875 | |
| 5876 | template <typename VkSubmitInfoType> |
| 5877 | VkResult ResourceTracker::on_vkQueueSubmitTemplate(void* context, VkResult input_result, |
| 5878 | VkQueue queue, uint32_t submitCount, |
| 5879 | const VkSubmitInfoType* pSubmits, |
| 5880 | VkFence fence) { |
| 5881 | flushStagingStreams(context, queue, submitCount, pSubmits); |
| 5882 | |
| 5883 | std::vector<VkSemaphore> pre_signal_semaphores; |
| 5884 | std::vector<zx_handle_t> pre_signal_events; |
| 5885 | std::vector<int> pre_signal_sync_fds; |
| 5886 | std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events; |
| 5887 | std::vector<int> post_wait_sync_fds; |
| 5888 | |
| 5889 | VkEncoder* enc = (VkEncoder*)context; |
| 5890 | |
| 5891 | AutoLock<RecursiveLock> lock(mLock); |
| 5892 | |
| 5893 | for (uint32_t i = 0; i < submitCount; ++i) { |
| 5894 | for (uint32_t j = 0; j < getWaitSemaphoreCount(pSubmits[i]); ++j) { |
| 5895 | VkSemaphore semaphore = getWaitSemaphore(pSubmits[i], j); |
| 5896 | auto it = info_VkSemaphore.find(semaphore); |
| 5897 | if (it != info_VkSemaphore.end()) { |
| 5898 | auto& semInfo = it->second; |
| 5899 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 5900 | if (semInfo.eventHandle) { |
| 5901 | pre_signal_events.push_back(semInfo.eventHandle); |
| 5902 | pre_signal_semaphores.push_back(semaphore); |
| 5903 | } |
| 5904 | #endif |
| 5905 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 5906 | if (semInfo.syncFd.has_value()) { |
| 5907 | pre_signal_sync_fds.push_back(semInfo.syncFd.value()); |
| 5908 | pre_signal_semaphores.push_back(semaphore); |
| 5909 | } |
| 5910 | #endif |
| 5911 | } |
| 5912 | } |
| 5913 | for (uint32_t j = 0; j < getSignalSemaphoreCount(pSubmits[i]); ++j) { |
| 5914 | auto it = info_VkSemaphore.find(getSignalSemaphore(pSubmits[i], j)); |
| 5915 | if (it != info_VkSemaphore.end()) { |
| 5916 | auto& semInfo = it->second; |
| 5917 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 5918 | if (semInfo.eventHandle) { |
| 5919 | post_wait_events.push_back({semInfo.eventHandle, semInfo.eventKoid}); |
| 5920 | #ifndef FUCHSIA_NO_TRACE |
| 5921 | if (semInfo.eventKoid != ZX_KOID_INVALID) { |
Mitchell Kember | d9f3e4a | 2024-01-29 16:53:33 -0800 | [diff] [blame] | 5922 | // TODO(fxbug.dev/42144867): Remove the "semaphore" |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 5923 | // FLOW_END events once it is removed from clients |
| 5924 | // (for example, gfx Engine). |
| 5925 | TRACE_FLOW_END("gfx", "semaphore", semInfo.eventKoid); |
| 5926 | TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event", semInfo.eventKoid); |
| 5927 | } |
| 5928 | #endif |
| 5929 | } |
| 5930 | #endif |
| 5931 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 5932 | if (semInfo.syncFd.value_or(-1) >= 0) { |
| 5933 | post_wait_sync_fds.push_back(semInfo.syncFd.value()); |
| 5934 | } |
| 5935 | #endif |
| 5936 | } |
| 5937 | } |
| 5938 | } |
| 5939 | lock.unlock(); |
| 5940 | |
| 5941 | if (pre_signal_semaphores.empty()) { |
| 5942 | input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence); |
| 5943 | if (input_result != VK_SUCCESS) return input_result; |
| 5944 | } else { |
| 5945 | // Schedule waits on the OS external objects and |
| 5946 | // signal the wait semaphores |
| 5947 | // in a separate thread. |
| 5948 | std::vector<WorkPool::Task> preSignalTasks; |
| 5949 | std::vector<WorkPool::Task> preSignalQueueSubmitTasks; |
| 5950 | ; |
| 5951 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 5952 | for (auto event : pre_signal_events) { |
| 5953 | preSignalTasks.push_back([event] { |
| 5954 | zx_object_wait_one(event, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, nullptr); |
| 5955 | }); |
| 5956 | } |
| 5957 | #endif |
| 5958 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 5959 | for (auto fd : pre_signal_sync_fds) { |
| 5960 | // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkImportSemaphoreFdInfoKHR.html |
| 5961 | // fd == -1 is treated as already signaled |
| 5962 | if (fd != -1) { |
| 5963 | preSignalTasks.push_back([fd] { |
| 5964 | auto* syncHelper = |
| 5965 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 5966 | syncHelper->wait(fd, 3000); |
| 5967 | }); |
| 5968 | } |
| 5969 | } |
| 5970 | #endif |
| 5971 | if (!preSignalTasks.empty()) { |
| 5972 | auto waitGroupHandle = mWorkPool.schedule(preSignalTasks); |
| 5973 | mWorkPool.waitAll(waitGroupHandle); |
| 5974 | } |
| 5975 | |
| 5976 | // Use the old version of VkSubmitInfo |
| 5977 | VkSubmitInfo submit_info = { |
| 5978 | .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, |
| 5979 | .waitSemaphoreCount = 0, |
| 5980 | .pWaitSemaphores = nullptr, |
| 5981 | .pWaitDstStageMask = nullptr, |
| 5982 | .signalSemaphoreCount = static_cast<uint32_t>(pre_signal_semaphores.size()), |
| 5983 | .pSignalSemaphores = pre_signal_semaphores.data()}; |
| 5984 | vkQueueSubmitEnc(enc, queue, 1, &submit_info, VK_NULL_HANDLE); |
| 5985 | input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence); |
| 5986 | if (input_result != VK_SUCCESS) return input_result; |
| 5987 | } |
| 5988 | lock.lock(); |
| 5989 | int externalFenceFdToSignal = -1; |
| 5990 | |
| 5991 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__) |
| 5992 | if (fence != VK_NULL_HANDLE) { |
| 5993 | auto it = info_VkFence.find(fence); |
| 5994 | if (it != info_VkFence.end()) { |
| 5995 | const auto& info = it->second; |
| 5996 | if (info.syncFd >= 0) { |
| 5997 | externalFenceFdToSignal = info.syncFd; |
| 5998 | } |
| 5999 | } |
| 6000 | } |
| 6001 | #endif |
| 6002 | if (externalFenceFdToSignal >= 0 || !post_wait_events.empty() || !post_wait_sync_fds.empty()) { |
| 6003 | std::vector<WorkPool::Task> tasks; |
| 6004 | |
| 6005 | tasks.push_back([queue, externalFenceFdToSignal, post_wait_events /* copy of zx handles */, |
| 6006 | post_wait_sync_fds /* copy of sync fds */] { |
| 6007 | auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc(); |
| 6008 | auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn); |
| 6009 | auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */); |
| 6010 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 6011 | AEMU_SCOPED_TRACE("on_vkQueueSubmit::SignalSemaphores"); |
| 6012 | (void)externalFenceFdToSignal; |
| 6013 | for (auto& [event, koid] : post_wait_events) { |
| 6014 | #ifndef FUCHSIA_NO_TRACE |
| 6015 | if (koid != ZX_KOID_INVALID) { |
| 6016 | TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid); |
| 6017 | TRACE_FLOW_BEGIN("gfx", "event_signal", koid); |
| 6018 | } |
| 6019 | #endif |
| 6020 | zx_object_signal(event, 0, ZX_EVENT_SIGNALED); |
| 6021 | } |
| 6022 | #endif |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 6023 | #if GFXSTREAM_ENABLE_GUEST_GOLDFISH |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6024 | for (auto& fd : post_wait_sync_fds) { |
| 6025 | goldfish_sync_signal(fd); |
| 6026 | } |
| 6027 | |
| 6028 | if (externalFenceFdToSignal >= 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6029 | mesa_logi("%s: external fence real signal: %d\n", __func__, externalFenceFdToSignal); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6030 | goldfish_sync_signal(externalFenceFdToSignal); |
| 6031 | } |
| 6032 | #endif |
| 6033 | }); |
| 6034 | auto queueAsyncWaitHandle = mWorkPool.schedule(tasks); |
| 6035 | auto& queueWorkItems = mQueueSensitiveWorkPoolItems[queue]; |
| 6036 | queueWorkItems.push_back(queueAsyncWaitHandle); |
| 6037 | } |
| 6038 | return VK_SUCCESS; |
| 6039 | } |
| 6040 | |
| 6041 | VkResult ResourceTracker::on_vkQueueWaitIdle(void* context, VkResult, VkQueue queue) { |
| 6042 | VkEncoder* enc = (VkEncoder*)context; |
| 6043 | |
| 6044 | AutoLock<RecursiveLock> lock(mLock); |
| 6045 | std::vector<WorkPool::WaitGroupHandle> toWait = mQueueSensitiveWorkPoolItems[queue]; |
| 6046 | mQueueSensitiveWorkPoolItems[queue].clear(); |
| 6047 | lock.unlock(); |
| 6048 | |
| 6049 | if (toWait.empty()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6050 | mesa_logi("%s: No queue-specific work pool items\n", __func__); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6051 | return enc->vkQueueWaitIdle(queue, true /* do lock */); |
| 6052 | } |
| 6053 | |
| 6054 | for (auto handle : toWait) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6055 | mesa_logi("%s: waiting on work group item: %llu\n", __func__, (unsigned long long)handle); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6056 | mWorkPool.waitAll(handle); |
| 6057 | } |
| 6058 | |
| 6059 | // now done waiting, get the host's opinion |
| 6060 | return enc->vkQueueWaitIdle(queue, true /* do lock */); |
| 6061 | } |
| 6062 | |
| 6063 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 6064 | void ResourceTracker::unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID* inputNativeInfo, |
| 6065 | VkNativeBufferANDROID* outputNativeInfo) { |
| 6066 | if (!inputNativeInfo || !inputNativeInfo->handle) { |
| 6067 | return; |
| 6068 | } |
| 6069 | |
| 6070 | if (!outputNativeInfo || !outputNativeInfo) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6071 | mesa_loge("FATAL: Local native buffer info not properly allocated!"); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6072 | abort(); |
| 6073 | } |
| 6074 | |
| 6075 | auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6076 | const native_handle_t* nativeHandle = (const native_handle_t*)inputNativeInfo->handle; |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6077 | *(uint32_t*)(outputNativeInfo->handle) = gralloc->getHostHandle(nativeHandle); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6078 | } |
| 6079 | |
| 6080 | void ResourceTracker::unwrap_VkBindImageMemorySwapchainInfoKHR( |
| 6081 | const VkBindImageMemorySwapchainInfoKHR* inputBimsi, |
| 6082 | VkBindImageMemorySwapchainInfoKHR* outputBimsi) { |
| 6083 | if (!inputBimsi || !inputBimsi->swapchain) { |
| 6084 | return; |
| 6085 | } |
| 6086 | |
| 6087 | if (!outputBimsi || !outputBimsi->swapchain) { |
Gurchetan Singh | e7fc357 | 2023-12-06 18:19:57 -0800 | [diff] [blame] | 6088 | return; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6089 | } |
| 6090 | |
| 6091 | // Android based swapchains are implemented by the Android framework's |
| 6092 | // libvulkan. The only exist within the guest and should not be sent to |
| 6093 | // the host. |
| 6094 | outputBimsi->swapchain = VK_NULL_HANDLE; |
| 6095 | } |
| 6096 | #endif |
| 6097 | |
| 6098 | void ResourceTracker::unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo* pCreateInfo, |
| 6099 | VkImageCreateInfo* local_pCreateInfo) { |
| 6100 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 6101 | const VkNativeBufferANDROID* inputNativeInfo = |
| 6102 | vk_find_struct<VkNativeBufferANDROID>(pCreateInfo); |
| 6103 | |
| 6104 | VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>( |
| 6105 | vk_find_struct<VkNativeBufferANDROID>(local_pCreateInfo)); |
| 6106 | |
| 6107 | unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo); |
| 6108 | #endif |
| 6109 | } |
| 6110 | |
| 6111 | void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) { |
| 6112 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 6113 | (void)fd_out; |
| 6114 | if (fd != -1) { |
| 6115 | AEMU_SCOPED_TRACE("waitNativeFenceInAcquire"); |
| 6116 | // Implicit Synchronization |
| 6117 | auto* syncHelper = |
| 6118 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 6119 | syncHelper->wait(fd, 3000); |
| 6120 | // From libvulkan's swapchain.cpp: |
| 6121 | // """ |
| 6122 | // NOTE: we're relying on AcquireImageANDROID to close fence_clone, |
| 6123 | // even if the call fails. We could close it ourselves on failure, but |
| 6124 | // that would create a race condition if the driver closes it on a |
| 6125 | // failure path: some other thread might create an fd with the same |
| 6126 | // number between the time the driver closes it and the time we close |
| 6127 | // it. We must assume one of: the driver *always* closes it even on |
| 6128 | // failure, or *never* closes it on failure. |
| 6129 | // """ |
| 6130 | // Therefore, assume contract where we need to close fd in this driver |
| 6131 | syncHelper->close(fd); |
| 6132 | } |
| 6133 | #endif |
| 6134 | } |
| 6135 | |
| 6136 | void ResourceTracker::unwrap_VkBindImageMemory2_pBindInfos( |
| 6137 | uint32_t bindInfoCount, const VkBindImageMemoryInfo* inputBindInfos, |
| 6138 | VkBindImageMemoryInfo* outputBindInfos) { |
| 6139 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 6140 | for (uint32_t i = 0; i < bindInfoCount; ++i) { |
| 6141 | const VkBindImageMemoryInfo* inputBindInfo = &inputBindInfos[i]; |
| 6142 | VkBindImageMemoryInfo* outputBindInfo = &outputBindInfos[i]; |
| 6143 | |
| 6144 | const VkNativeBufferANDROID* inputNativeInfo = |
| 6145 | vk_find_struct<VkNativeBufferANDROID>(inputBindInfo); |
| 6146 | |
| 6147 | VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>( |
| 6148 | vk_find_struct<VkNativeBufferANDROID>(outputBindInfo)); |
| 6149 | |
| 6150 | unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo); |
| 6151 | |
| 6152 | const VkBindImageMemorySwapchainInfoKHR* inputBimsi = |
| 6153 | vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(inputBindInfo); |
| 6154 | |
| 6155 | VkBindImageMemorySwapchainInfoKHR* outputBimsi = |
| 6156 | const_cast<VkBindImageMemorySwapchainInfoKHR*>( |
| 6157 | vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(outputBindInfo)); |
| 6158 | |
| 6159 | unwrap_VkBindImageMemorySwapchainInfoKHR(inputBimsi, outputBimsi); |
| 6160 | } |
| 6161 | #endif |
| 6162 | } |
| 6163 | |
| 6164 | // Action of vkMapMemoryIntoAddressSpaceGOOGLE: |
| 6165 | // 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre): |
| 6166 | // uses address space device to reserve the right size of |
| 6167 | // memory. |
| 6168 | // 2. the reservation results in a physical address. the physical |
| 6169 | // address is set as |*pAddress|. |
| 6170 | // 3. after pre, the API call is encoded to the host, where the |
| 6171 | // value of pAddress is also sent (the physical address). |
| 6172 | // 4. the host will obtain the actual gpu pointer and send it |
| 6173 | // back out in |*pAddress|. |
| 6174 | // 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run, |
| 6175 | // using the mmap() method of GoldfishAddressSpaceBlock to obtain |
| 6176 | // a pointer in guest userspace corresponding to the host pointer. |
| 6177 | VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void*, VkResult, VkDevice, |
| 6178 | VkDeviceMemory memory, |
| 6179 | uint64_t* pAddress) { |
| 6180 | AutoLock<RecursiveLock> lock(mLock); |
| 6181 | |
| 6182 | auto it = info_VkDeviceMemory.find(memory); |
| 6183 | if (it == info_VkDeviceMemory.end()) { |
| 6184 | return VK_ERROR_OUT_OF_HOST_MEMORY; |
| 6185 | } |
| 6186 | |
| 6187 | #if defined(__ANDROID__) |
| 6188 | auto& memInfo = it->second; |
| 6189 | |
| 6190 | GoldfishAddressSpaceBlockPtr block = std::make_shared<GoldfishAddressSpaceBlock>(); |
| 6191 | block->allocate(mGoldfishAddressSpaceBlockProvider.get(), memInfo.coherentMemorySize); |
| 6192 | |
| 6193 | memInfo.goldfishBlock = block; |
| 6194 | *pAddress = block->physAddr(); |
| 6195 | |
| 6196 | return VK_SUCCESS; |
| 6197 | #else |
| 6198 | (void)pAddress; |
| 6199 | return VK_ERROR_MEMORY_MAP_FAILED; |
| 6200 | #endif |
| 6201 | } |
| 6202 | |
| 6203 | VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(void*, VkResult input_result, |
| 6204 | VkDevice, VkDeviceMemory memory, |
| 6205 | uint64_t* pAddress) { |
| 6206 | (void)memory; |
| 6207 | (void)pAddress; |
| 6208 | |
| 6209 | if (input_result != VK_SUCCESS) { |
| 6210 | return input_result; |
| 6211 | } |
| 6212 | |
| 6213 | return input_result; |
| 6214 | } |
| 6215 | |
| 6216 | VkResult ResourceTracker::initDescriptorUpdateTemplateBuffers( |
| 6217 | const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, |
| 6218 | VkDescriptorUpdateTemplate descriptorUpdateTemplate) { |
| 6219 | AutoLock<RecursiveLock> lock(mLock); |
| 6220 | |
| 6221 | auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate); |
| 6222 | if (it == info_VkDescriptorUpdateTemplate.end()) { |
| 6223 | return VK_ERROR_INITIALIZATION_FAILED; |
| 6224 | } |
| 6225 | |
| 6226 | auto& info = it->second; |
| 6227 | uint32_t inlineUniformBlockBufferSize = 0; |
| 6228 | |
| 6229 | for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) { |
| 6230 | const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i]; |
| 6231 | uint32_t descCount = entry.descriptorCount; |
| 6232 | VkDescriptorType descType = entry.descriptorType; |
| 6233 | ++info.templateEntryCount; |
| 6234 | if (isDescriptorTypeInlineUniformBlock(descType)) { |
| 6235 | inlineUniformBlockBufferSize += descCount; |
| 6236 | ++info.inlineUniformBlockCount; |
| 6237 | } else { |
| 6238 | for (uint32_t j = 0; j < descCount; ++j) { |
| 6239 | if (isDescriptorTypeImageInfo(descType)) { |
| 6240 | ++info.imageInfoCount; |
| 6241 | } else if (isDescriptorTypeBufferInfo(descType)) { |
| 6242 | ++info.bufferInfoCount; |
| 6243 | } else if (isDescriptorTypeBufferView(descType)) { |
| 6244 | ++info.bufferViewCount; |
| 6245 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6246 | mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6247 | // abort(); |
| 6248 | } |
| 6249 | } |
| 6250 | } |
| 6251 | } |
| 6252 | |
| 6253 | if (info.templateEntryCount) |
| 6254 | info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount]; |
| 6255 | |
| 6256 | if (info.imageInfoCount) { |
| 6257 | info.imageInfoIndices = new uint32_t[info.imageInfoCount]; |
| 6258 | info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount]; |
| 6259 | } |
| 6260 | |
| 6261 | if (info.bufferInfoCount) { |
| 6262 | info.bufferInfoIndices = new uint32_t[info.bufferInfoCount]; |
| 6263 | info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount]; |
| 6264 | } |
| 6265 | |
| 6266 | if (info.bufferViewCount) { |
| 6267 | info.bufferViewIndices = new uint32_t[info.bufferViewCount]; |
| 6268 | info.bufferViews = new VkBufferView[info.bufferViewCount]; |
| 6269 | } |
| 6270 | |
| 6271 | if (info.inlineUniformBlockCount) { |
| 6272 | info.inlineUniformBlockBuffer.resize(inlineUniformBlockBufferSize); |
| 6273 | info.inlineUniformBlockBytesPerBlocks.resize(info.inlineUniformBlockCount); |
| 6274 | } |
| 6275 | |
| 6276 | uint32_t imageInfoIndex = 0; |
| 6277 | uint32_t bufferInfoIndex = 0; |
| 6278 | uint32_t bufferViewIndex = 0; |
| 6279 | uint32_t inlineUniformBlockIndex = 0; |
| 6280 | |
| 6281 | for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) { |
| 6282 | const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i]; |
| 6283 | uint32_t descCount = entry.descriptorCount; |
| 6284 | VkDescriptorType descType = entry.descriptorType; |
| 6285 | |
| 6286 | info.templateEntries[i] = entry; |
| 6287 | |
| 6288 | if (isDescriptorTypeInlineUniformBlock(descType)) { |
| 6289 | info.inlineUniformBlockBytesPerBlocks[inlineUniformBlockIndex] = descCount; |
| 6290 | ++inlineUniformBlockIndex; |
| 6291 | } else { |
| 6292 | for (uint32_t j = 0; j < descCount; ++j) { |
| 6293 | if (isDescriptorTypeImageInfo(descType)) { |
| 6294 | info.imageInfoIndices[imageInfoIndex] = i; |
| 6295 | ++imageInfoIndex; |
| 6296 | } else if (isDescriptorTypeBufferInfo(descType)) { |
| 6297 | info.bufferInfoIndices[bufferInfoIndex] = i; |
| 6298 | ++bufferInfoIndex; |
| 6299 | } else if (isDescriptorTypeBufferView(descType)) { |
| 6300 | info.bufferViewIndices[bufferViewIndex] = i; |
| 6301 | ++bufferViewIndex; |
| 6302 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6303 | mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6304 | // abort(); |
| 6305 | } |
| 6306 | } |
| 6307 | } |
| 6308 | } |
| 6309 | |
| 6310 | return VK_SUCCESS; |
| 6311 | } |
| 6312 | |
| 6313 | VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate( |
| 6314 | void* context, VkResult input_result, VkDevice device, |
| 6315 | const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, |
| 6316 | const VkAllocationCallbacks* pAllocator, |
| 6317 | VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) { |
| 6318 | (void)context; |
| 6319 | (void)device; |
| 6320 | (void)pAllocator; |
| 6321 | |
| 6322 | if (input_result != VK_SUCCESS) return input_result; |
| 6323 | |
| 6324 | return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate); |
| 6325 | } |
| 6326 | |
| 6327 | VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR( |
| 6328 | void* context, VkResult input_result, VkDevice device, |
| 6329 | const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, |
| 6330 | const VkAllocationCallbacks* pAllocator, |
| 6331 | VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) { |
| 6332 | (void)context; |
| 6333 | (void)device; |
| 6334 | (void)pAllocator; |
| 6335 | |
| 6336 | if (input_result != VK_SUCCESS) return input_result; |
| 6337 | |
| 6338 | return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate); |
| 6339 | } |
| 6340 | |
| 6341 | void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate( |
| 6342 | void* context, VkDevice device, VkDescriptorSet descriptorSet, |
| 6343 | VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) { |
| 6344 | VkEncoder* enc = (VkEncoder*)context; |
| 6345 | |
| 6346 | uint8_t* userBuffer = (uint8_t*)pData; |
| 6347 | if (!userBuffer) return; |
| 6348 | |
| 6349 | // TODO: Make this thread safe |
| 6350 | AutoLock<RecursiveLock> lock(mLock); |
| 6351 | |
| 6352 | auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate); |
| 6353 | if (it == info_VkDescriptorUpdateTemplate.end()) { |
| 6354 | return; |
| 6355 | } |
| 6356 | |
| 6357 | auto& info = it->second; |
| 6358 | |
| 6359 | uint32_t templateEntryCount = info.templateEntryCount; |
| 6360 | VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries; |
| 6361 | |
| 6362 | uint32_t imageInfoCount = info.imageInfoCount; |
| 6363 | uint32_t bufferInfoCount = info.bufferInfoCount; |
| 6364 | uint32_t bufferViewCount = info.bufferViewCount; |
| 6365 | uint32_t inlineUniformBlockCount = info.inlineUniformBlockCount; |
| 6366 | uint32_t* imageInfoIndices = info.imageInfoIndices; |
| 6367 | uint32_t* bufferInfoIndices = info.bufferInfoIndices; |
| 6368 | uint32_t* bufferViewIndices = info.bufferViewIndices; |
| 6369 | VkDescriptorImageInfo* imageInfos = info.imageInfos; |
| 6370 | VkDescriptorBufferInfo* bufferInfos = info.bufferInfos; |
| 6371 | VkBufferView* bufferViews = info.bufferViews; |
| 6372 | uint8_t* inlineUniformBlockBuffer = info.inlineUniformBlockBuffer.data(); |
| 6373 | uint32_t* inlineUniformBlockBytesPerBlocks = info.inlineUniformBlockBytesPerBlocks.data(); |
| 6374 | |
| 6375 | lock.unlock(); |
| 6376 | |
| 6377 | size_t currImageInfoOffset = 0; |
| 6378 | size_t currBufferInfoOffset = 0; |
| 6379 | size_t currBufferViewOffset = 0; |
| 6380 | size_t inlineUniformBlockOffset = 0; |
| 6381 | size_t inlineUniformBlockIdx = 0; |
| 6382 | |
| 6383 | struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet); |
| 6384 | ReifiedDescriptorSet* reified = ds->reified; |
| 6385 | |
| 6386 | bool batched = mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate; |
| 6387 | |
| 6388 | for (uint32_t i = 0; i < templateEntryCount; ++i) { |
| 6389 | const auto& entry = templateEntries[i]; |
| 6390 | VkDescriptorType descType = entry.descriptorType; |
| 6391 | uint32_t dstBinding = entry.dstBinding; |
| 6392 | |
| 6393 | auto offset = entry.offset; |
| 6394 | auto stride = entry.stride; |
| 6395 | auto dstArrayElement = entry.dstArrayElement; |
| 6396 | |
| 6397 | uint32_t descCount = entry.descriptorCount; |
| 6398 | |
| 6399 | if (isDescriptorTypeImageInfo(descType)) { |
| 6400 | if (!stride) stride = sizeof(VkDescriptorImageInfo); |
| 6401 | |
| 6402 | const VkDescriptorImageInfo* currImageInfoBegin = |
| 6403 | (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset); |
| 6404 | |
| 6405 | for (uint32_t j = 0; j < descCount; ++j) { |
| 6406 | const VkDescriptorImageInfo* user = |
| 6407 | (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride); |
| 6408 | |
| 6409 | memcpy(((uint8_t*)imageInfos) + currImageInfoOffset, user, |
| 6410 | sizeof(VkDescriptorImageInfo)); |
| 6411 | currImageInfoOffset += sizeof(VkDescriptorImageInfo); |
| 6412 | } |
| 6413 | |
| 6414 | if (batched) { |
| 6415 | doEmulatedDescriptorImageInfoWriteFromTemplate( |
| 6416 | descType, dstBinding, dstArrayElement, descCount, currImageInfoBegin, reified); |
| 6417 | } |
| 6418 | } else if (isDescriptorTypeBufferInfo(descType)) { |
| 6419 | if (!stride) stride = sizeof(VkDescriptorBufferInfo); |
| 6420 | |
| 6421 | const VkDescriptorBufferInfo* currBufferInfoBegin = |
| 6422 | (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset); |
| 6423 | |
| 6424 | for (uint32_t j = 0; j < descCount; ++j) { |
| 6425 | const VkDescriptorBufferInfo* user = |
| 6426 | (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride); |
| 6427 | |
| 6428 | memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset, user, |
| 6429 | sizeof(VkDescriptorBufferInfo)); |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6430 | #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR) |
| 6431 | // Convert mesa to internal for objects in the user buffer |
| 6432 | VkDescriptorBufferInfo* internalBufferInfo = |
| 6433 | (VkDescriptorBufferInfo*)(((uint8_t*)bufferInfos) + currBufferInfoOffset); |
| 6434 | VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstream_buffer, internalBufferInfo->buffer); |
| 6435 | internalBufferInfo->buffer = gfxstream_buffer->internal_object; |
| 6436 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6437 | currBufferInfoOffset += sizeof(VkDescriptorBufferInfo); |
| 6438 | } |
| 6439 | |
| 6440 | if (batched) { |
| 6441 | doEmulatedDescriptorBufferInfoWriteFromTemplate( |
| 6442 | descType, dstBinding, dstArrayElement, descCount, currBufferInfoBegin, reified); |
| 6443 | } |
| 6444 | |
| 6445 | } else if (isDescriptorTypeBufferView(descType)) { |
| 6446 | if (!stride) stride = sizeof(VkBufferView); |
| 6447 | |
| 6448 | const VkBufferView* currBufferViewBegin = |
| 6449 | (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset); |
| 6450 | |
| 6451 | for (uint32_t j = 0; j < descCount; ++j) { |
| 6452 | const VkBufferView* user = (const VkBufferView*)(userBuffer + offset + j * stride); |
| 6453 | |
| 6454 | memcpy(((uint8_t*)bufferViews) + currBufferViewOffset, user, sizeof(VkBufferView)); |
| 6455 | currBufferViewOffset += sizeof(VkBufferView); |
| 6456 | } |
| 6457 | |
| 6458 | if (batched) { |
| 6459 | doEmulatedDescriptorBufferViewWriteFromTemplate( |
| 6460 | descType, dstBinding, dstArrayElement, descCount, currBufferViewBegin, reified); |
| 6461 | } |
| 6462 | } else if (isDescriptorTypeInlineUniformBlock(descType)) { |
| 6463 | uint32_t inlineUniformBlockBytesPerBlock = |
| 6464 | inlineUniformBlockBytesPerBlocks[inlineUniformBlockIdx]; |
| 6465 | uint8_t* currInlineUniformBlockBufferBegin = |
| 6466 | inlineUniformBlockBuffer + inlineUniformBlockOffset; |
| 6467 | memcpy(currInlineUniformBlockBufferBegin, userBuffer + offset, |
| 6468 | inlineUniformBlockBytesPerBlock); |
| 6469 | inlineUniformBlockIdx++; |
| 6470 | inlineUniformBlockOffset += inlineUniformBlockBytesPerBlock; |
| 6471 | |
| 6472 | if (batched) { |
| 6473 | doEmulatedDescriptorInlineUniformBlockFromTemplate( |
| 6474 | descType, dstBinding, dstArrayElement, descCount, |
| 6475 | currInlineUniformBlockBufferBegin, reified); |
| 6476 | } |
| 6477 | } else { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6478 | mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6479 | abort(); |
| 6480 | } |
| 6481 | } |
| 6482 | |
| 6483 | if (batched) return; |
| 6484 | |
| 6485 | enc->vkUpdateDescriptorSetWithTemplateSized2GOOGLE( |
| 6486 | device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount, |
| 6487 | bufferViewCount, static_cast<uint32_t>(info.inlineUniformBlockBuffer.size()), |
| 6488 | imageInfoIndices, bufferInfoIndices, bufferViewIndices, imageInfos, bufferInfos, |
| 6489 | bufferViews, inlineUniformBlockBuffer, true /* do lock */); |
| 6490 | } |
| 6491 | |
Jean-Francois Thibert | f884afd | 2024-01-31 10:15:04 -0500 | [diff] [blame] | 6492 | void ResourceTracker::on_vkUpdateDescriptorSetWithTemplateKHR( |
| 6493 | void* context, VkDevice device, VkDescriptorSet descriptorSet, |
| 6494 | VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) { |
| 6495 | on_vkUpdateDescriptorSetWithTemplate(context, device, descriptorSet, descriptorUpdateTemplate, |
| 6496 | pData); |
| 6497 | } |
| 6498 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6499 | VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2_common( |
| 6500 | bool isKhr, void* context, VkResult input_result, VkPhysicalDevice physicalDevice, |
| 6501 | const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, |
| 6502 | VkImageFormatProperties2* pImageFormatProperties) { |
| 6503 | VkEncoder* enc = (VkEncoder*)context; |
| 6504 | (void)input_result; |
| 6505 | |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6506 | uint32_t supportedHandleType = 0; |
| 6507 | VkExternalImageFormatProperties* ext_img_properties = |
| 6508 | vk_find_struct<VkExternalImageFormatProperties>(pImageFormatProperties); |
| 6509 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6510 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 6511 | |
| 6512 | constexpr VkFormat kExternalImageSupportedFormats[] = { |
| 6513 | VK_FORMAT_B8G8R8A8_SINT, VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_B8G8R8A8_SRGB, |
| 6514 | VK_FORMAT_B8G8R8A8_SNORM, VK_FORMAT_B8G8R8A8_SSCALED, VK_FORMAT_B8G8R8A8_USCALED, |
| 6515 | VK_FORMAT_R8G8B8A8_SINT, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_SRGB, |
| 6516 | VK_FORMAT_R8G8B8A8_SNORM, VK_FORMAT_R8G8B8A8_SSCALED, VK_FORMAT_R8G8B8A8_USCALED, |
| 6517 | VK_FORMAT_R8_UNORM, VK_FORMAT_R8_UINT, VK_FORMAT_R8_USCALED, |
| 6518 | VK_FORMAT_R8_SNORM, VK_FORMAT_R8_SINT, VK_FORMAT_R8_SSCALED, |
| 6519 | VK_FORMAT_R8_SRGB, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8_UINT, |
| 6520 | VK_FORMAT_R8G8_USCALED, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8_SINT, |
| 6521 | VK_FORMAT_R8G8_SSCALED, VK_FORMAT_R8G8_SRGB, |
| 6522 | }; |
| 6523 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6524 | if (ext_img_properties) { |
| 6525 | if (std::find(std::begin(kExternalImageSupportedFormats), |
| 6526 | std::end(kExternalImageSupportedFormats), |
| 6527 | pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) { |
| 6528 | return VK_ERROR_FORMAT_NOT_SUPPORTED; |
| 6529 | } |
| 6530 | } |
C Stout | 5a3a422 | 2023-11-14 16:31:56 -0800 | [diff] [blame] | 6531 | supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6532 | #endif |
| 6533 | |
| 6534 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 6535 | VkAndroidHardwareBufferUsageANDROID* output_ahw_usage = |
| 6536 | vk_find_struct<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties); |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6537 | supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6538 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6539 | #endif |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6540 | const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info = |
| 6541 | vk_find_struct<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo); |
| 6542 | if (supportedHandleType && ext_img_info) { |
| 6543 | // 0 is a valid handleType so we don't check against 0 |
| 6544 | if (ext_img_info->handleType != (ext_img_info->handleType & supportedHandleType)) { |
| 6545 | return VK_ERROR_FORMAT_NOT_SUPPORTED; |
| 6546 | } |
| 6547 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6548 | |
| 6549 | VkResult hostRes; |
| 6550 | |
| 6551 | if (isKhr) { |
| 6552 | hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR( |
| 6553 | physicalDevice, pImageFormatInfo, pImageFormatProperties, true /* do lock */); |
| 6554 | } else { |
| 6555 | hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2( |
| 6556 | physicalDevice, pImageFormatInfo, pImageFormatProperties, true /* do lock */); |
| 6557 | } |
| 6558 | |
| 6559 | if (hostRes != VK_SUCCESS) return hostRes; |
| 6560 | |
| 6561 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 6562 | if (ext_img_properties) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6563 | if (ext_img_info) { |
| 6564 | if (static_cast<uint32_t>(ext_img_info->handleType) == |
| 6565 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) { |
| 6566 | ext_img_properties->externalMemoryProperties = { |
| 6567 | .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT | |
| 6568 | VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, |
| 6569 | .exportFromImportedHandleTypes = |
| 6570 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA, |
| 6571 | .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA, |
| 6572 | }; |
| 6573 | } |
| 6574 | } |
| 6575 | } |
| 6576 | #endif |
| 6577 | |
| 6578 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 6579 | if (output_ahw_usage) { |
| 6580 | output_ahw_usage->androidHardwareBufferUsage = getAndroidHardwareBufferUsageFromVkUsage( |
| 6581 | pImageFormatInfo->flags, pImageFormatInfo->usage); |
| 6582 | } |
| 6583 | #endif |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6584 | if (ext_img_properties) { |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6585 | transformImpl_VkExternalMemoryProperties_fromhost( |
| 6586 | &ext_img_properties->externalMemoryProperties, 0); |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6587 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6588 | return hostRes; |
| 6589 | } |
| 6590 | |
| 6591 | VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2( |
| 6592 | void* context, VkResult input_result, VkPhysicalDevice physicalDevice, |
| 6593 | const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, |
| 6594 | VkImageFormatProperties2* pImageFormatProperties) { |
| 6595 | return on_vkGetPhysicalDeviceImageFormatProperties2_common( |
| 6596 | false /* not KHR */, context, input_result, physicalDevice, pImageFormatInfo, |
| 6597 | pImageFormatProperties); |
| 6598 | } |
| 6599 | |
| 6600 | VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR( |
| 6601 | void* context, VkResult input_result, VkPhysicalDevice physicalDevice, |
| 6602 | const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, |
| 6603 | VkImageFormatProperties2* pImageFormatProperties) { |
| 6604 | return on_vkGetPhysicalDeviceImageFormatProperties2_common( |
| 6605 | true /* is KHR */, context, input_result, physicalDevice, pImageFormatInfo, |
| 6606 | pImageFormatProperties); |
| 6607 | } |
| 6608 | |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6609 | void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties_common( |
| 6610 | bool isKhr, void* context, VkPhysicalDevice physicalDevice, |
| 6611 | const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, |
| 6612 | VkExternalBufferProperties* pExternalBufferProperties) { |
| 6613 | VkEncoder* enc = (VkEncoder*)context; |
| 6614 | |
Jason Macnak | abe57a8 | 2024-02-02 17:02:21 -0800 | [diff] [blame] | 6615 | #if defined(ANDROID) |
Yahan Zhou | 79ab57a | 2023-09-14 16:24:26 -0700 | [diff] [blame] | 6616 | // Older versions of Goldfish's Gralloc did not support allocating AHARDWAREBUFFER_FORMAT_BLOB |
| 6617 | // with GPU usage (b/299520213). |
| 6618 | if (ResourceTracker::threadingCallbacks.hostConnectionGetFunc() |
| 6619 | ->grallocHelper() |
| 6620 | ->treatBlobAsImage() && |
| 6621 | pExternalBufferInfo->handleType == |
| 6622 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) { |
| 6623 | pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0; |
| 6624 | pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0; |
| 6625 | pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0; |
| 6626 | return; |
| 6627 | } |
Jason Macnak | abe57a8 | 2024-02-02 17:02:21 -0800 | [diff] [blame] | 6628 | #endif |
Yahan Zhou | 79ab57a | 2023-09-14 16:24:26 -0700 | [diff] [blame] | 6629 | |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6630 | uint32_t supportedHandleType = 0; |
| 6631 | #ifdef VK_USE_PLATFORM_FUCHSIA |
C Stout | 5a3a422 | 2023-11-14 16:31:56 -0800 | [diff] [blame] | 6632 | supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA; |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6633 | #endif |
| 6634 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 6635 | supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT | |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6636 | VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6637 | #endif |
| 6638 | if (supportedHandleType) { |
| 6639 | // 0 is a valid handleType so we can't check against 0 |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6640 | if (pExternalBufferInfo->handleType != |
| 6641 | (pExternalBufferInfo->handleType & supportedHandleType)) { |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6642 | return; |
| 6643 | } |
| 6644 | } |
| 6645 | |
| 6646 | if (isKhr) { |
| 6647 | enc->vkGetPhysicalDeviceExternalBufferPropertiesKHR( |
| 6648 | physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */); |
| 6649 | } else { |
| 6650 | enc->vkGetPhysicalDeviceExternalBufferProperties( |
| 6651 | physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */); |
| 6652 | } |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6653 | transformImpl_VkExternalMemoryProperties_fromhost( |
| 6654 | &pExternalBufferProperties->externalMemoryProperties, 0); |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6655 | } |
| 6656 | |
| 6657 | void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties( |
| 6658 | void* context, VkPhysicalDevice physicalDevice, |
| 6659 | const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, |
| 6660 | VkExternalBufferProperties* pExternalBufferProperties) { |
| 6661 | return on_vkGetPhysicalDeviceExternalBufferProperties_common( |
| 6662 | false /* not KHR */, context, physicalDevice, pExternalBufferInfo, |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6663 | pExternalBufferProperties); |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6664 | } |
| 6665 | |
| 6666 | void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferPropertiesKHR( |
| 6667 | void* context, VkPhysicalDevice physicalDevice, |
| 6668 | const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo, |
| 6669 | VkExternalBufferPropertiesKHR* pExternalBufferProperties) { |
| 6670 | return on_vkGetPhysicalDeviceExternalBufferProperties_common( |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 6671 | true /* is KHR */, context, physicalDevice, pExternalBufferInfo, pExternalBufferProperties); |
Andrew Woloszyn | 416d0a1 | 2023-10-04 17:02:19 -0400 | [diff] [blame] | 6672 | } |
| 6673 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6674 | void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties( |
| 6675 | void*, VkPhysicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, |
| 6676 | VkExternalSemaphoreProperties* pExternalSemaphoreProperties) { |
| 6677 | (void)pExternalSemaphoreInfo; |
| 6678 | (void)pExternalSemaphoreProperties; |
| 6679 | #ifdef VK_USE_PLATFORM_FUCHSIA |
| 6680 | if (pExternalSemaphoreInfo->handleType == |
| 6681 | static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) { |
| 6682 | pExternalSemaphoreProperties->compatibleHandleTypes |= |
| 6683 | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA; |
| 6684 | pExternalSemaphoreProperties->exportFromImportedHandleTypes |= |
| 6685 | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA; |
| 6686 | pExternalSemaphoreProperties->externalSemaphoreFeatures |= |
| 6687 | VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT | |
| 6688 | VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT; |
| 6689 | } |
| 6690 | #else |
Yahan Zhou | 8a4505f | 2023-10-11 11:05:04 -0700 | [diff] [blame] | 6691 | const VkSemaphoreTypeCreateInfo* semaphoreTypeCi = |
| 6692 | vk_find_struct<VkSemaphoreTypeCreateInfo>(pExternalSemaphoreInfo); |
| 6693 | bool isSemaphoreTimeline = |
| 6694 | semaphoreTypeCi != nullptr && semaphoreTypeCi->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE; |
| 6695 | if (isSemaphoreTimeline) { |
| 6696 | // b/304373623 |
| 6697 | // dEQP-VK.api.external.semaphore.sync_fd#info_timeline |
| 6698 | pExternalSemaphoreProperties->compatibleHandleTypes = 0; |
| 6699 | pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0; |
| 6700 | pExternalSemaphoreProperties->externalSemaphoreFeatures = 0; |
| 6701 | } else if (pExternalSemaphoreInfo->handleType == |
| 6702 | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6703 | pExternalSemaphoreProperties->compatibleHandleTypes |= |
| 6704 | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT; |
| 6705 | pExternalSemaphoreProperties->exportFromImportedHandleTypes |= |
| 6706 | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT; |
| 6707 | pExternalSemaphoreProperties->externalSemaphoreFeatures |= |
| 6708 | VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT | |
| 6709 | VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT; |
| 6710 | } |
| 6711 | #endif // VK_USE_PLATFORM_FUCHSIA |
| 6712 | } |
| 6713 | |
| 6714 | void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( |
| 6715 | void* context, VkPhysicalDevice physicalDevice, |
| 6716 | const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, |
| 6717 | VkExternalSemaphoreProperties* pExternalSemaphoreProperties) { |
| 6718 | on_vkGetPhysicalDeviceExternalSemaphoreProperties( |
| 6719 | context, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties); |
| 6720 | } |
| 6721 | |
| 6722 | void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* object, |
| 6723 | CleanupCallback callback) { |
| 6724 | AutoLock<RecursiveLock> lock(mLock); |
| 6725 | auto& callbacks = mEncoderCleanupCallbacks[encoder]; |
| 6726 | callbacks[object] = callback; |
| 6727 | } |
| 6728 | |
| 6729 | void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) { |
| 6730 | AutoLock<RecursiveLock> lock(mLock); |
| 6731 | mEncoderCleanupCallbacks[encoder].erase(object); |
| 6732 | } |
| 6733 | |
| 6734 | void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) { |
| 6735 | AutoLock<RecursiveLock> lock(mLock); |
| 6736 | if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return; |
| 6737 | |
| 6738 | std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder]; |
| 6739 | |
| 6740 | mEncoderCleanupCallbacks.erase(encoder); |
| 6741 | lock.unlock(); |
| 6742 | |
| 6743 | for (auto it : callbackCopies) { |
| 6744 | it.second(); |
| 6745 | } |
| 6746 | } |
| 6747 | |
| 6748 | CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() { |
| 6749 | if (mFeatureInfo->hasVulkanAuxCommandMemory) { |
| 6750 | return [this](size_t size) -> CommandBufferStagingStream::Memory { |
| 6751 | VkMemoryAllocateInfo info{ |
| 6752 | .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, |
| 6753 | .pNext = nullptr, |
| 6754 | .allocationSize = size, |
| 6755 | .memoryTypeIndex = VK_MAX_MEMORY_TYPES // indicates auxiliary memory |
| 6756 | }; |
| 6757 | |
| 6758 | auto enc = ResourceTracker::getThreadLocalEncoder(); |
| 6759 | VkDevice device = VK_NULL_HANDLE; |
| 6760 | VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE; |
| 6761 | VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem); |
| 6762 | if (result != VK_SUCCESS) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6763 | mesa_loge("Failed to get coherent memory %u", result); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6764 | return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr}; |
| 6765 | } |
| 6766 | |
| 6767 | // getCoherentMemory() uses suballocations. |
| 6768 | // To retrieve the suballocated memory address, look up |
| 6769 | // VkDeviceMemory filled in by getCoherentMemory() |
| 6770 | // scope of mLock |
| 6771 | { |
| 6772 | AutoLock<RecursiveLock> lock(mLock); |
| 6773 | const auto it = info_VkDeviceMemory.find(vkDeviceMem); |
| 6774 | if (it == info_VkDeviceMemory.end()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6775 | mesa_loge("Coherent memory allocated %u not found", result); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6776 | return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr}; |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 6777 | }; |
| 6778 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6779 | const auto& info = it->second; |
| 6780 | return {.deviceMemory = vkDeviceMem, .ptr = info.ptr}; |
| 6781 | } |
| 6782 | }; |
| 6783 | } |
| 6784 | return nullptr; |
| 6785 | } |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 6786 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6787 | CommandBufferStagingStream::Free ResourceTracker::getFree() { |
| 6788 | if (mFeatureInfo->hasVulkanAuxCommandMemory) { |
| 6789 | return [this](const CommandBufferStagingStream::Memory& memory) { |
| 6790 | // deviceMemory may not be the actual backing auxiliary VkDeviceMemory |
| 6791 | // for suballocations, deviceMemory is a alias VkDeviceMemory hand; |
| 6792 | // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory |
| 6793 | VkDeviceMemory deviceMemory = memory.deviceMemory; |
| 6794 | AutoLock<RecursiveLock> lock(mLock); |
| 6795 | auto it = info_VkDeviceMemory.find(deviceMemory); |
| 6796 | if (it == info_VkDeviceMemory.end()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 6797 | mesa_loge("Device memory to free not found"); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6798 | return; |
| 6799 | } |
| 6800 | auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second); |
| 6801 | // We have to release the lock before we could possibly free a |
| 6802 | // CoherentMemory, because that will call into VkEncoder, which |
| 6803 | // shouldn't be called when the lock is held. |
| 6804 | lock.unlock(); |
| 6805 | coherentMemory = nullptr; |
| 6806 | }; |
| 6807 | } |
| 6808 | return nullptr; |
| 6809 | } |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 6810 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6811 | VkResult ResourceTracker::on_vkBeginCommandBuffer(void* context, VkResult input_result, |
| 6812 | VkCommandBuffer commandBuffer, |
| 6813 | const VkCommandBufferBeginInfo* pBeginInfo) { |
| 6814 | (void)context; |
| 6815 | |
| 6816 | resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, |
| 6817 | true /* also clear pending descriptor sets */); |
| 6818 | |
| 6819 | VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer); |
| 6820 | (void)input_result; |
| 6821 | |
| 6822 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 6823 | cb->flags = pBeginInfo->flags; |
| 6824 | |
| 6825 | VkCommandBufferBeginInfo modifiedBeginInfo; |
| 6826 | |
| 6827 | if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) { |
| 6828 | modifiedBeginInfo = *pBeginInfo; |
| 6829 | modifiedBeginInfo.pInheritanceInfo = nullptr; |
| 6830 | pBeginInfo = &modifiedBeginInfo; |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 6831 | } |
| 6832 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6833 | if (!supportsDeferredCommands()) { |
| 6834 | return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */); |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 6835 | } |
| 6836 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6837 | enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */); |
Lingfeng Yang | c53e747 | 2019-03-27 08:50:55 -0700 | [diff] [blame] | 6838 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6839 | return VK_SUCCESS; |
| 6840 | } |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 6841 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6842 | VkResult ResourceTracker::on_vkEndCommandBuffer(void* context, VkResult input_result, |
| 6843 | VkCommandBuffer commandBuffer) { |
| 6844 | VkEncoder* enc = (VkEncoder*)context; |
| 6845 | (void)input_result; |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 6846 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6847 | if (!supportsDeferredCommands()) { |
| 6848 | return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */); |
Lingfeng Yang | c53e747 | 2019-03-27 08:50:55 -0700 | [diff] [blame] | 6849 | } |
| 6850 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6851 | enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */); |
Lingfeng Yang | c53e747 | 2019-03-27 08:50:55 -0700 | [diff] [blame] | 6852 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6853 | return VK_SUCCESS; |
| 6854 | } |
Lingfeng Yang | c53e747 | 2019-03-27 08:50:55 -0700 | [diff] [blame] | 6855 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6856 | VkResult ResourceTracker::on_vkResetCommandBuffer(void* context, VkResult input_result, |
| 6857 | VkCommandBuffer commandBuffer, |
| 6858 | VkCommandBufferResetFlags flags) { |
Jean-Francois Thibert | ed8433a | 2024-05-07 15:14:55 -0400 | [diff] [blame] | 6859 | VkEncoder* enc = (VkEncoder*)context; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6860 | (void)input_result; |
Lingfeng Yang | 39a276e | 2019-06-17 13:27:22 -0700 | [diff] [blame] | 6861 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6862 | if (!supportsDeferredCommands()) { |
Jean-Francois Thibert | ed8433a | 2024-05-07 15:14:55 -0400 | [diff] [blame] | 6863 | VkResult res = enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */); |
| 6864 | resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, |
| 6865 | true /* also clear pending descriptor sets */); |
| 6866 | return res; |
Lingfeng Yang | c53e747 | 2019-03-27 08:50:55 -0700 | [diff] [blame] | 6867 | } |
| 6868 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6869 | enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */); |
Jean-Francois Thibert | ed8433a | 2024-05-07 15:14:55 -0400 | [diff] [blame] | 6870 | resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, |
| 6871 | true /* also clear pending descriptor sets */); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6872 | return VK_SUCCESS; |
| 6873 | } |
Lingfeng Yang | c53e747 | 2019-03-27 08:50:55 -0700 | [diff] [blame] | 6874 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6875 | VkResult ResourceTracker::on_vkCreateImageView(void* context, VkResult input_result, |
| 6876 | VkDevice device, |
| 6877 | const VkImageViewCreateInfo* pCreateInfo, |
| 6878 | const VkAllocationCallbacks* pAllocator, |
| 6879 | VkImageView* pView) { |
| 6880 | VkEncoder* enc = (VkEncoder*)context; |
| 6881 | (void)input_result; |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 6882 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6883 | VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo); |
| 6884 | vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo); |
Roman Kiryanov | 6db11e5 | 2019-04-26 14:18:14 -0700 | [diff] [blame] | 6885 | |
Gurchetan Singh | 24e2da1 | 2022-06-02 16:52:49 -0700 | [diff] [blame] | 6886 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6887 | if (pCreateInfo->format == VK_FORMAT_UNDEFINED) { |
| 6888 | AutoLock<RecursiveLock> lock(mLock); |
Kaiyi Li | a713b41 | 2021-09-20 07:03:01 -0700 | [diff] [blame] | 6889 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6890 | auto it = info_VkImage.find(pCreateInfo->image); |
| 6891 | if (it != info_VkImage.end() && it->second.hasExternalFormat) { |
Sergiu | ad91847 | 2024-05-21 16:28:45 +0100 | [diff] [blame] | 6892 | localCreateInfo.format = vk_format_from_fourcc(it->second.externalFourccFormat); |
Kaiyi Li | a713b41 | 2021-09-20 07:03:01 -0700 | [diff] [blame] | 6893 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6894 | } |
| 6895 | VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo; |
| 6896 | const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo = |
| 6897 | vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo); |
| 6898 | if (samplerYcbcrConversionInfo) { |
| 6899 | if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) { |
| 6900 | localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo); |
| 6901 | vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo); |
Roman Kiryanov | 6db11e5 | 2019-04-26 14:18:14 -0700 | [diff] [blame] | 6902 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6903 | } |
Roman Kiryanov | 6db11e5 | 2019-04-26 14:18:14 -0700 | [diff] [blame] | 6904 | #endif |
| 6905 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6906 | return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */); |
| 6907 | } |
| 6908 | |
| 6909 | void ResourceTracker::on_vkCmdExecuteCommands(void* context, VkCommandBuffer commandBuffer, |
| 6910 | uint32_t commandBufferCount, |
| 6911 | const VkCommandBuffer* pCommandBuffers) { |
| 6912 | VkEncoder* enc = (VkEncoder*)context; |
| 6913 | |
| 6914 | if (!mFeatureInfo->hasVulkanQueueSubmitWithCommands) { |
| 6915 | enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers, |
| 6916 | true /* do lock */); |
| 6917 | return; |
Roman Kiryanov | 6db11e5 | 2019-04-26 14:18:14 -0700 | [diff] [blame] | 6918 | } |
| 6919 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6920 | struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer); |
| 6921 | for (uint32_t i = 0; i < commandBufferCount; ++i) { |
| 6922 | struct goldfish_VkCommandBuffer* secondary = |
| 6923 | as_goldfish_VkCommandBuffer(pCommandBuffers[i]); |
| 6924 | appendObject(&secondary->superObjects, primary); |
| 6925 | appendObject(&primary->subObjects, secondary); |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 6926 | } |
| 6927 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6928 | enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers, |
| 6929 | true /* do lock */); |
| 6930 | } |
Lingfeng Yang | f8cdd8b | 2021-02-08 11:32:45 -0800 | [diff] [blame] | 6931 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6932 | void ResourceTracker::on_vkCmdBindDescriptorSets(void* context, VkCommandBuffer commandBuffer, |
| 6933 | VkPipelineBindPoint pipelineBindPoint, |
| 6934 | VkPipelineLayout layout, uint32_t firstSet, |
| 6935 | uint32_t descriptorSetCount, |
| 6936 | const VkDescriptorSet* pDescriptorSets, |
| 6937 | uint32_t dynamicOffsetCount, |
| 6938 | const uint32_t* pDynamicOffsets) { |
| 6939 | VkEncoder* enc = (VkEncoder*)context; |
Lingfeng Yang | f8cdd8b | 2021-02-08 11:32:45 -0800 | [diff] [blame] | 6940 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6941 | if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) |
| 6942 | addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets); |
Lingfeng Yang | f8cdd8b | 2021-02-08 11:32:45 -0800 | [diff] [blame] | 6943 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6944 | enc->vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, |
| 6945 | descriptorSetCount, pDescriptorSets, dynamicOffsetCount, |
| 6946 | pDynamicOffsets, true /* do lock */); |
| 6947 | } |
Lingfeng Yang | f8cdd8b | 2021-02-08 11:32:45 -0800 | [diff] [blame] | 6948 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6949 | void ResourceTracker::on_vkCmdPipelineBarrier( |
| 6950 | void* context, VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, |
| 6951 | VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, |
| 6952 | uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, |
| 6953 | uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, |
| 6954 | uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) { |
| 6955 | VkEncoder* enc = (VkEncoder*)context; |
Lingfeng Yang | 55676e0 | 2021-02-08 08:39:45 -0800 | [diff] [blame] | 6956 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6957 | std::vector<VkImageMemoryBarrier> updatedImageMemoryBarriers; |
| 6958 | updatedImageMemoryBarriers.reserve(imageMemoryBarrierCount); |
| 6959 | for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { |
| 6960 | VkImageMemoryBarrier barrier = pImageMemoryBarriers[i]; |
Jason Macnak | e6704de | 2022-05-27 09:41:20 -0700 | [diff] [blame] | 6961 | |
| 6962 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6963 | // Unfortunetly, Android does not yet have a mechanism for sharing the expected |
| 6964 | // VkImageLayout when passing around AHardwareBuffer-s so many existing users |
| 6965 | // that import AHardwareBuffer-s into VkImage-s/VkDeviceMemory-s simply use |
| 6966 | // VK_IMAGE_LAYOUT_UNDEFINED. However, the Vulkan spec's image layout transition |
| 6967 | // sections says "If the old layout is VK_IMAGE_LAYOUT_UNDEFINED, the contents of |
| 6968 | // that range may be discarded." Some Vulkan drivers have been observed to actually |
| 6969 | // perform the discard which leads to AHardwareBuffer-s being unintentionally |
| 6970 | // cleared. See go/ahb-vkimagelayout for more information. |
| 6971 | if (barrier.srcQueueFamilyIndex != barrier.dstQueueFamilyIndex && |
| 6972 | (barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL || |
| 6973 | barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) && |
| 6974 | barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) { |
| 6975 | // This is not a complete solution as the Vulkan spec does not require that |
| 6976 | // Vulkan drivers perform a no-op in the case when oldLayout equals newLayout |
| 6977 | // but this has been observed to be enough to work for now to avoid clearing |
| 6978 | // out images. |
| 6979 | // TODO(b/236179843): figure out long term solution. |
| 6980 | barrier.oldLayout = barrier.newLayout; |
| 6981 | } |
Jason Macnak | e6704de | 2022-05-27 09:41:20 -0700 | [diff] [blame] | 6982 | #endif |
| 6983 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6984 | updatedImageMemoryBarriers.push_back(barrier); |
Jason Macnak | e6704de | 2022-05-27 09:41:20 -0700 | [diff] [blame] | 6985 | } |
| 6986 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6987 | enc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, |
| 6988 | memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, |
| 6989 | pBufferMemoryBarriers, updatedImageMemoryBarriers.size(), |
| 6990 | updatedImageMemoryBarriers.data(), true /* do lock */); |
| 6991 | } |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 6992 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6993 | void ResourceTracker::on_vkDestroyDescriptorSetLayout(void* context, VkDevice device, |
| 6994 | VkDescriptorSetLayout descriptorSetLayout, |
| 6995 | const VkAllocationCallbacks* pAllocator) { |
| 6996 | decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator); |
| 6997 | } |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 6998 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 6999 | VkResult ResourceTracker::on_vkAllocateCommandBuffers( |
| 7000 | void* context, VkResult input_result, VkDevice device, |
| 7001 | const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) { |
| 7002 | (void)input_result; |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 7003 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7004 | VkEncoder* enc = (VkEncoder*)context; |
| 7005 | VkResult res = |
| 7006 | enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */); |
| 7007 | if (VK_SUCCESS != res) return res; |
| 7008 | |
| 7009 | for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) { |
| 7010 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]); |
| 7011 | cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY; |
| 7012 | cb->device = device; |
Lingfeng Yang | a4ae052 | 2021-02-17 14:12:19 -0800 | [diff] [blame] | 7013 | } |
| 7014 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7015 | return res; |
| 7016 | } |
Lingfeng Yang | 97f51af | 2021-04-22 12:24:11 -0700 | [diff] [blame] | 7017 | |
Gurchetan Singh | 72a6758 | 2022-09-09 13:52:16 -0700 | [diff] [blame] | 7018 | #if defined(VK_USE_PLATFORM_ANDROID_KHR) |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7019 | VkResult ResourceTracker::exportSyncFdForQSRILocked(VkImage image, int* fd) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 7020 | mesa_logi("%s: call for image %p hos timage handle 0x%llx\n", __func__, (void*)image, |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7021 | (unsigned long long)get_host_u64_VkImage(image)); |
Gurchetan Singh | 72a6758 | 2022-09-09 13:52:16 -0700 | [diff] [blame] | 7022 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7023 | if (mFeatureInfo->hasVirtioGpuNativeSync) { |
| 7024 | struct VirtGpuExecBuffer exec = {}; |
| 7025 | struct gfxstreamCreateQSRIExportVK exportQSRI = {}; |
| 7026 | VirtGpuDevice* instance = VirtGpuDevice::getInstance(); |
Gurchetan Singh | 72a6758 | 2022-09-09 13:52:16 -0700 | [diff] [blame] | 7027 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7028 | uint64_t hostImageHandle = get_host_u64_VkImage(image); |
Gurchetan Singh | 72a6758 | 2022-09-09 13:52:16 -0700 | [diff] [blame] | 7029 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7030 | exportQSRI.hdr.opCode = GFXSTREAM_CREATE_QSRI_EXPORT_VK; |
| 7031 | exportQSRI.imageHandleLo = (uint32_t)hostImageHandle; |
| 7032 | exportQSRI.imageHandleHi = (uint32_t)(hostImageHandle >> 32); |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7033 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7034 | exec.command = static_cast<void*>(&exportQSRI); |
| 7035 | exec.command_size = sizeof(exportQSRI); |
| 7036 | exec.flags = kFenceOut | kRingIdx; |
| 7037 | if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY; |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7038 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7039 | *fd = exec.handle.osHandle; |
| 7040 | } else { |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 7041 | #if GFXSTREAM_ENABLE_GUEST_GOLDFISH |
Yahan Zhou | 8d5457f | 2023-10-04 15:48:27 -0700 | [diff] [blame] | 7042 | ensureSyncDeviceFd(); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7043 | goldfish_sync_queue_work( |
| 7044 | mSyncDeviceFd, get_host_u64_VkImage(image) /* the handle */, |
| 7045 | GOLDFISH_SYNC_VULKAN_QSRI /* thread handle (doubling as type field) */, fd); |
Gurchetan Singh | b7feebd | 2024-01-23 14:12:36 -0800 | [diff] [blame] | 7046 | #endif |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7047 | } |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7048 | |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 7049 | mesa_logi("%s: got fd: %d\n", __func__, *fd); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7050 | auto imageInfoIt = info_VkImage.find(image); |
| 7051 | if (imageInfoIt != info_VkImage.end()) { |
| 7052 | auto& imageInfo = imageInfoIt->second; |
| 7053 | |
| 7054 | auto* syncHelper = |
| 7055 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 7056 | |
| 7057 | // Remove any pending QSRI sync fds that are already signaled. |
| 7058 | auto syncFdIt = imageInfo.pendingQsriSyncFds.begin(); |
| 7059 | while (syncFdIt != imageInfo.pendingQsriSyncFds.end()) { |
| 7060 | int syncFd = *syncFdIt; |
| 7061 | int syncWaitRet = syncHelper->wait(syncFd, /*timeout msecs*/ 0); |
| 7062 | if (syncWaitRet == 0) { |
| 7063 | // Sync fd is signaled. |
| 7064 | syncFdIt = imageInfo.pendingQsriSyncFds.erase(syncFdIt); |
| 7065 | syncHelper->close(syncFd); |
| 7066 | } else { |
| 7067 | if (errno != ETIME) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 7068 | mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d", |
| 7069 | __func__, strerror(errno), errno); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7070 | } |
| 7071 | break; |
| 7072 | } |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7073 | } |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 7074 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7075 | int syncFdDup = syncHelper->dup(*fd); |
| 7076 | if (syncFdDup < 0) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 7077 | mesa_loge("%s: Failed to dup() QSRI sync fd : sterror: %s errno: %d", __func__, |
| 7078 | strerror(errno), errno); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7079 | } else { |
| 7080 | imageInfo.pendingQsriSyncFds.push_back(syncFdDup); |
| 7081 | } |
| 7082 | } |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 7083 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7084 | return VK_SUCCESS; |
| 7085 | } |
Jason Macnak | a194bbf | 2023-07-20 10:21:13 -0700 | [diff] [blame] | 7086 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7087 | VkResult ResourceTracker::on_vkQueueSignalReleaseImageANDROID(void* context, VkResult input_result, |
| 7088 | VkQueue queue, |
| 7089 | uint32_t waitSemaphoreCount, |
| 7090 | const VkSemaphore* pWaitSemaphores, |
| 7091 | VkImage image, int* pNativeFenceFd) { |
| 7092 | (void)input_result; |
| 7093 | |
| 7094 | VkEncoder* enc = (VkEncoder*)context; |
| 7095 | |
| 7096 | if (!mFeatureInfo->hasVulkanAsyncQsri) { |
| 7097 | return enc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores, |
| 7098 | image, pNativeFenceFd, true /* lock */); |
| 7099 | } |
| 7100 | |
| 7101 | { |
| 7102 | AutoLock<RecursiveLock> lock(mLock); |
| 7103 | auto it = info_VkImage.find(image); |
| 7104 | if (it == info_VkImage.end()) { |
| 7105 | if (pNativeFenceFd) *pNativeFenceFd = -1; |
| 7106 | return VK_ERROR_INITIALIZATION_FAILED; |
| 7107 | } |
| 7108 | } |
| 7109 | |
| 7110 | enc->vkQueueSignalReleaseImageANDROIDAsyncGOOGLE(queue, waitSemaphoreCount, pWaitSemaphores, |
| 7111 | image, true /* lock */); |
| 7112 | |
| 7113 | AutoLock<RecursiveLock> lock(mLock); |
| 7114 | VkResult result; |
| 7115 | if (pNativeFenceFd) { |
| 7116 | result = exportSyncFdForQSRILocked(image, pNativeFenceFd); |
| 7117 | } else { |
| 7118 | int syncFd; |
| 7119 | result = exportSyncFdForQSRILocked(image, &syncFd); |
| 7120 | |
| 7121 | if (syncFd >= 0) { |
| 7122 | auto* syncHelper = |
| 7123 | ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper(); |
| 7124 | syncHelper->close(syncFd); |
| 7125 | } |
| 7126 | } |
| 7127 | |
| 7128 | return result; |
| 7129 | } |
| 7130 | #endif |
| 7131 | |
| 7132 | VkResult ResourceTracker::on_vkCreateGraphicsPipelines( |
| 7133 | void* context, VkResult input_result, VkDevice device, VkPipelineCache pipelineCache, |
| 7134 | uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, |
| 7135 | const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { |
| 7136 | (void)input_result; |
| 7137 | VkEncoder* enc = (VkEncoder*)context; |
| 7138 | std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(pCreateInfos, |
| 7139 | pCreateInfos + createInfoCount); |
| 7140 | for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) { |
| 7141 | // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics |
| 7142 | bool requireViewportState = false; |
| 7143 | // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750 |
| 7144 | requireViewportState |= |
| 7145 | graphicsPipelineCreateInfo.pRasterizationState != nullptr && |
| 7146 | graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable == VK_FALSE; |
| 7147 | // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892 |
| 7148 | #ifdef VK_EXT_extended_dynamic_state2 |
| 7149 | if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) { |
| 7150 | for (uint32_t i = 0; i < graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount; |
| 7151 | i++) { |
| 7152 | if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT == |
| 7153 | graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) { |
| 7154 | requireViewportState = true; |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 7155 | break; |
| 7156 | } |
| 7157 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7158 | } |
| 7159 | #endif // VK_EXT_extended_dynamic_state2 |
| 7160 | if (!requireViewportState) { |
| 7161 | graphicsPipelineCreateInfo.pViewportState = nullptr; |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 7162 | } |
Jason Macnak | 119ec5d | 2022-06-23 16:18:33 -0700 | [diff] [blame] | 7163 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7164 | // It has the same requirement as for pViewportState. |
| 7165 | bool shouldIncludeFragmentShaderState = requireViewportState; |
| 7166 | |
| 7167 | // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751 |
| 7168 | if (!shouldIncludeFragmentShaderState) { |
| 7169 | graphicsPipelineCreateInfo.pMultisampleState = nullptr; |
| 7170 | } |
| 7171 | |
Andrew Woloszyn | cb3aaad | 2023-10-06 15:22:33 -0400 | [diff] [blame] | 7172 | bool forceDepthStencilState = false; |
| 7173 | bool forceColorBlendState = false; |
| 7174 | |
| 7175 | const VkPipelineRenderingCreateInfo* pipelineRenderingInfo = |
| 7176 | vk_find_struct<VkPipelineRenderingCreateInfo>(&graphicsPipelineCreateInfo); |
| 7177 | |
| 7178 | if (pipelineRenderingInfo) { |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 7179 | forceDepthStencilState |= |
| 7180 | pipelineRenderingInfo->depthAttachmentFormat != VK_FORMAT_UNDEFINED; |
| 7181 | forceDepthStencilState |= |
| 7182 | pipelineRenderingInfo->stencilAttachmentFormat != VK_FORMAT_UNDEFINED; |
Andrew Woloszyn | cb3aaad | 2023-10-06 15:22:33 -0400 | [diff] [blame] | 7183 | forceColorBlendState |= pipelineRenderingInfo->colorAttachmentCount != 0; |
| 7184 | } |
| 7185 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7186 | // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043 |
| 7187 | // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044 |
| 7188 | if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE || |
| 7189 | !shouldIncludeFragmentShaderState) { |
Andrew Woloszyn | cb3aaad | 2023-10-06 15:22:33 -0400 | [diff] [blame] | 7190 | // VUID-VkGraphicsPipelineCreateInfo-renderPass-06053 |
| 7191 | if (!forceDepthStencilState) { |
| 7192 | graphicsPipelineCreateInfo.pDepthStencilState = nullptr; |
| 7193 | } |
| 7194 | if (!forceColorBlendState) { |
| 7195 | graphicsPipelineCreateInfo.pColorBlendState = nullptr; |
| 7196 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7197 | } |
| 7198 | } |
| 7199 | return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(), |
| 7200 | localCreateInfos.data(), pAllocator, pPipelines, |
| 7201 | true /* do lock */); |
| 7202 | } |
| 7203 | |
| 7204 | uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) const { |
| 7205 | AutoLock<RecursiveLock> lock(mLock); |
| 7206 | uint32_t api = kDefaultApiVersion; |
| 7207 | |
| 7208 | auto it = info_VkInstance.find(instance); |
| 7209 | if (it == info_VkInstance.end()) return api; |
| 7210 | |
| 7211 | api = it->second.highestApiVersion; |
| 7212 | |
| 7213 | return api; |
| 7214 | } |
| 7215 | |
| 7216 | uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) const { |
| 7217 | AutoLock<RecursiveLock> lock(mLock); |
| 7218 | |
| 7219 | uint32_t api = kDefaultApiVersion; |
| 7220 | |
| 7221 | auto it = info_VkDevice.find(device); |
| 7222 | if (it == info_VkDevice.end()) return api; |
| 7223 | |
| 7224 | api = it->second.apiVersion; |
| 7225 | |
| 7226 | return api; |
| 7227 | } |
| 7228 | |
| 7229 | bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string& name) const { |
| 7230 | AutoLock<RecursiveLock> lock(mLock); |
| 7231 | |
| 7232 | auto it = info_VkInstance.find(instance); |
| 7233 | if (it == info_VkInstance.end()) return false; |
| 7234 | |
| 7235 | return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end(); |
| 7236 | } |
| 7237 | |
| 7238 | bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string& name) const { |
| 7239 | AutoLock<RecursiveLock> lock(mLock); |
| 7240 | |
| 7241 | auto it = info_VkDevice.find(device); |
| 7242 | if (it == info_VkDevice.end()) return false; |
| 7243 | |
| 7244 | return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end(); |
| 7245 | } |
| 7246 | |
| 7247 | VkDevice ResourceTracker::getDevice(VkCommandBuffer commandBuffer) const { |
| 7248 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 7249 | if (!cb) { |
| 7250 | return nullptr; |
| 7251 | } |
| 7252 | return cb->device; |
| 7253 | } |
| 7254 | |
| 7255 | // Resets staging stream for this command buffer and primary command buffers |
| 7256 | // where this command buffer has been recorded. If requested, also clears the pending |
| 7257 | // descriptor sets. |
| 7258 | void ResourceTracker::resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer, |
| 7259 | bool alsoResetPrimaries, |
| 7260 | bool alsoClearPendingDescriptorSets) { |
| 7261 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 7262 | if (!cb) { |
| 7263 | return; |
| 7264 | } |
| 7265 | if (cb->privateEncoder) { |
| 7266 | sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder); |
| 7267 | cb->privateEncoder = nullptr; |
| 7268 | cb->privateStream = nullptr; |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7269 | } |
| 7270 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7271 | if (alsoClearPendingDescriptorSets && cb->userPtr) { |
| 7272 | CommandBufferPendingDescriptorSets* pendingSets = |
| 7273 | (CommandBufferPendingDescriptorSets*)cb->userPtr; |
| 7274 | pendingSets->sets.clear(); |
| 7275 | } |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7276 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7277 | if (alsoResetPrimaries) { |
| 7278 | forAllObjects(cb->superObjects, [this, alsoResetPrimaries, |
| 7279 | alsoClearPendingDescriptorSets](void* obj) { |
| 7280 | VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj; |
| 7281 | struct goldfish_VkCommandBuffer* superCb = |
| 7282 | as_goldfish_VkCommandBuffer(superCommandBuffer); |
| 7283 | this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries, |
| 7284 | alsoClearPendingDescriptorSets); |
| 7285 | }); |
| 7286 | eraseObjects(&cb->superObjects); |
| 7287 | } |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7288 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7289 | forAllObjects(cb->subObjects, [cb](void* obj) { |
| 7290 | VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj; |
| 7291 | struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer); |
| 7292 | // We don't do resetCommandBufferStagingInfo(subCommandBuffer) |
| 7293 | // since the user still might have submittable stuff pending there. |
| 7294 | eraseObject(&subCb->superObjects, (void*)cb); |
| 7295 | }); |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7296 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7297 | eraseObjects(&cb->subObjects); |
| 7298 | } |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7299 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7300 | // Unlike resetCommandBufferStagingInfo, this does not always erase its |
| 7301 | // superObjects pointers because the command buffer has merely been |
| 7302 | // submitted, not reset. However, if the command buffer was recorded with |
| 7303 | // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries. |
| 7304 | // |
| 7305 | // Also, we save the set of descriptor sets referenced by this command |
| 7306 | // buffer because we only submitted the command buffer and it's possible to |
| 7307 | // update the descriptor set again and re-submit the same command without |
| 7308 | // recording it (Update-after-bind descriptor sets) |
| 7309 | void ResourceTracker::resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) { |
| 7310 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 7311 | if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) { |
| 7312 | resetCommandBufferStagingInfo(commandBuffer, true /* reset primaries */, |
| 7313 | true /* clear pending descriptor sets */); |
| 7314 | } else { |
| 7315 | resetCommandBufferStagingInfo(commandBuffer, false /* Don't reset primaries */, |
| 7316 | false /* Don't clear pending descriptor sets */); |
| 7317 | } |
| 7318 | } |
| 7319 | |
| 7320 | void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) { |
| 7321 | struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool); |
| 7322 | |
| 7323 | if (!p) return; |
| 7324 | |
| 7325 | forAllObjects(p->subObjects, [this](void* commandBuffer) { |
| 7326 | this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer, |
| 7327 | true /* also reset primaries */, |
| 7328 | true /* also clear pending descriptor sets */); |
| 7329 | }); |
| 7330 | } |
| 7331 | |
| 7332 | void ResourceTracker::addToCommandPool(VkCommandPool commandPool, uint32_t commandBufferCount, |
| 7333 | VkCommandBuffer* pCommandBuffers) { |
| 7334 | for (uint32_t i = 0; i < commandBufferCount; ++i) { |
| 7335 | struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool); |
| 7336 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]); |
| 7337 | appendObject(&p->subObjects, (void*)(pCommandBuffers[i])); |
| 7338 | appendObject(&cb->poolObjects, (void*)commandPool); |
| 7339 | } |
| 7340 | } |
| 7341 | |
| 7342 | void ResourceTracker::clearCommandPool(VkCommandPool commandPool) { |
| 7343 | resetCommandPoolStagingInfo(commandPool); |
| 7344 | struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool); |
| 7345 | forAllObjects(p->subObjects, [this](void* commandBuffer) { |
| 7346 | this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer); |
| 7347 | }); |
| 7348 | eraseObjects(&p->subObjects); |
| 7349 | } |
| 7350 | |
| 7351 | const VkPhysicalDeviceMemoryProperties& ResourceTracker::getPhysicalDeviceMemoryProperties( |
| 7352 | void* context, VkDevice device, VkPhysicalDevice physicalDevice) { |
| 7353 | if (!mCachedPhysicalDeviceMemoryProps) { |
| 7354 | if (physicalDevice == VK_NULL_HANDLE) { |
Hailin zhang | 5770e4c | 2022-08-29 19:56:11 +0000 | [diff] [blame] | 7355 | AutoLock<RecursiveLock> lock(mLock); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7356 | |
| 7357 | auto deviceInfoIt = info_VkDevice.find(device); |
| 7358 | if (deviceInfoIt == info_VkDevice.end()) { |
Gurchetan Singh | 42361f7 | 2024-05-16 17:37:11 -0700 | [diff] [blame] | 7359 | mesa_loge("Failed to pass device or physical device."); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7360 | abort(); |
Hailin zhang | 5770e4c | 2022-08-29 19:56:11 +0000 | [diff] [blame] | 7361 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7362 | const auto& deviceInfo = deviceInfoIt->second; |
| 7363 | physicalDevice = deviceInfo.physdev; |
Lingfeng Yang | 7efc857 | 2021-07-13 16:30:10 -0700 | [diff] [blame] | 7364 | } |
| 7365 | |
Yahan Zhou | 483ac02 | 2022-06-13 15:41:11 -0700 | [diff] [blame] | 7366 | VkEncoder* enc = (VkEncoder*)context; |
Yahan Zhou | 483ac02 | 2022-06-13 15:41:11 -0700 | [diff] [blame] | 7367 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7368 | VkPhysicalDeviceMemoryProperties properties; |
| 7369 | enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties, true /* no lock */); |
Yahan Zhou | 483ac02 | 2022-06-13 15:41:11 -0700 | [diff] [blame] | 7370 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7371 | mCachedPhysicalDeviceMemoryProps.emplace(std::move(properties)); |
Yahan Zhou | 483ac02 | 2022-06-13 15:41:11 -0700 | [diff] [blame] | 7372 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7373 | return *mCachedPhysicalDeviceMemoryProps; |
| 7374 | } |
Yahan Zhou | 483ac02 | 2022-06-13 15:41:11 -0700 | [diff] [blame] | 7375 | |
Lingfeng Yang | 71b596b | 2018-11-07 18:03:25 -0800 | [diff] [blame] | 7376 | static ResourceTracker* sTracker = nullptr; |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7377 | |
| 7378 | ResourceTracker::ResourceTracker() { |
| 7379 | mCreateMapping = new CreateMapping(); |
| 7380 | mDestroyMapping = new DestroyMapping(); |
| 7381 | // nothing to do |
| 7382 | } |
| 7383 | |
| 7384 | ResourceTracker::~ResourceTracker() { |
| 7385 | delete mCreateMapping; |
| 7386 | delete mDestroyMapping; |
| 7387 | } |
| 7388 | |
| 7389 | VulkanHandleMapping* ResourceTracker::createMapping() { return mCreateMapping; } |
| 7390 | |
| 7391 | VulkanHandleMapping* ResourceTracker::destroyMapping() { return mDestroyMapping; } |
| 7392 | |
Lingfeng Yang | 71b596b | 2018-11-07 18:03:25 -0800 | [diff] [blame] | 7393 | // static |
| 7394 | ResourceTracker* ResourceTracker::get() { |
| 7395 | if (!sTracker) { |
| 7396 | // To be initialized once on vulkan device open. |
| 7397 | sTracker = new ResourceTracker; |
| 7398 | } |
| 7399 | return sTracker; |
| 7400 | } |
Lingfeng Yang | 71b596b | 2018-11-07 18:03:25 -0800 | [diff] [blame] | 7401 | |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 7402 | // static |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 7403 | ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getCommandBufferEncoder( |
| 7404 | VkCommandBuffer commandBuffer) { |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7405 | if (!(ResourceTracker::streamFeatureBits & |
| 7406 | VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) { |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 7407 | auto enc = ResourceTracker::getThreadLocalEncoder(); |
| 7408 | ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc); |
| 7409 | return enc; |
| 7410 | } |
| 7411 | |
| 7412 | struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer); |
| 7413 | if (!cb->privateEncoder) { |
Shalini S | db704c9 | 2023-01-27 21:35:33 +0000 | [diff] [blame] | 7414 | sStaging.setAllocFree(ResourceTracker::get()->getAlloc(), |
| 7415 | ResourceTracker::get()->getFree()); |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 7416 | sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder); |
| 7417 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7418 | uint8_t* writtenPtr; |
| 7419 | size_t written; |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 7420 | ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written); |
| 7421 | return cb->privateEncoder; |
| 7422 | } |
| 7423 | |
| 7424 | // static |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 7425 | ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) { |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 7426 | auto enc = ResourceTracker::getThreadLocalEncoder(); |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7427 | if (!(ResourceTracker::streamFeatureBits & |
| 7428 | VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) { |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 7429 | ResourceTracker::get()->syncEncodersForQueue(queue, enc); |
| 7430 | } |
| 7431 | return enc; |
| 7432 | } |
| 7433 | |
| 7434 | // static |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 7435 | ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getThreadLocalEncoder() { |
Lingfeng Yang | 967f9af | 2021-01-22 17:56:24 -0800 | [diff] [blame] | 7436 | auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc(); |
| 7437 | auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn); |
| 7438 | return vkEncoder; |
| 7439 | } |
Lingfeng Yang | f0654ff | 2019-02-02 12:21:24 -0800 | [diff] [blame] | 7440 | |
Lingfeng Yang | db61655 | 2021-01-22 17:58:02 -0800 | [diff] [blame] | 7441 | // static |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7442 | void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) { sSeqnoPtr = seqnoptr; } |
Lingfeng Yang | db61655 | 2021-01-22 17:58:02 -0800 | [diff] [blame] | 7443 | |
| 7444 | // static |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 7445 | ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::nextSeqno() { |
Lingfeng Yang | db61655 | 2021-01-22 17:58:02 -0800 | [diff] [blame] | 7446 | uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST); |
| 7447 | return res; |
| 7448 | } |
| 7449 | |
| 7450 | // static |
Gurchetan Singh | c4444b8 | 2023-09-19 08:06:20 -0700 | [diff] [blame] | 7451 | ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::getSeqno() { |
Lingfeng Yang | db61655 | 2021-01-22 17:58:02 -0800 | [diff] [blame] | 7452 | uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST); |
| 7453 | return res; |
| 7454 | } |
| 7455 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7456 | void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties*, |
| 7457 | uint32_t) {} |
Lingfeng Yang | 154a33c | 2019-01-29 19:06:23 -0800 | [diff] [blame] | 7458 | |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7459 | void ResourceTracker::transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo*, uint32_t) { |
Lingfeng Yang | def88ba | 2018-12-13 12:43:17 -0800 | [diff] [blame] | 7460 | } |
Gurchetan Singh | fa4dfda | 2023-09-18 17:11:24 -0700 | [diff] [blame] | 7461 | void ResourceTracker::transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo*, uint32_t) {} |
Kaiyi Li | fb9dd35 | 2021-12-09 22:08:51 +0000 | [diff] [blame] | 7462 | |
Yilong Li | dbc16d5 | 2021-02-04 03:15:21 -0800 | [diff] [blame] | 7463 | #define DEFINE_TRANSFORMED_TYPE_IMPL(type) \ |
| 7464 | void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \ |
| 7465 | void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {} |
Lingfeng Yang | 2b1b8cf | 2019-02-08 09:53:36 -0800 | [diff] [blame] | 7466 | |
Yilong Li | 52ed69b | 2021-02-05 01:47:32 -0800 | [diff] [blame] | 7467 | LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL) |
Lingfeng Yang | 2b1b8cf | 2019-02-08 09:53:36 -0800 | [diff] [blame] | 7468 | |
Jason Macnak | 3d66400 | 2023-03-30 16:00:50 -0700 | [diff] [blame] | 7469 | } // namespace vk |
| 7470 | } // namespace gfxstream |