Flutter Impeller
context_vk.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 #include <thread>
7 #include <unordered_map>
8 
9 #include "fml/concurrent_message_loop.h"
10 #include "impeller/core/formats.h"
17 
18 #ifdef FML_OS_ANDROID
19 #include <pthread.h>
20 #include <sys/resource.h>
21 #include <sys/time.h>
22 #endif // FML_OS_ANDROID
23 
24 #include <map>
25 #include <memory>
26 #include <optional>
27 #include <string>
28 #include <vector>
29 
30 #include "flutter/fml/cpu_affinity.h"
31 #include "flutter/fml/trace_event.h"
46 
47 VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
48 
49 namespace impeller {
50 
51 static bool gHasValidationLayers = false;
52 
54  return gHasValidationLayers;
55 }
56 
57 static std::optional<vk::PhysicalDevice> PickPhysicalDevice(
58  const CapabilitiesVK& caps,
59  const vk::Instance& instance) {
60  for (const auto& device : instance.enumeratePhysicalDevices().value) {
61  if (caps.GetEnabledDeviceFeatures(device).has_value()) {
62  return device;
63  }
64  }
65  return std::nullopt;
66 }
67 
68 static std::vector<vk::DeviceQueueCreateInfo> GetQueueCreateInfos(
69  std::initializer_list<QueueIndexVK> queues) {
70  std::map<size_t /* family */, size_t /* index */> family_index_map;
71  for (const auto& queue : queues) {
72  family_index_map[queue.family] = 0;
73  }
74  for (const auto& queue : queues) {
75  auto value = family_index_map[queue.family];
76  family_index_map[queue.family] = std::max(value, queue.index);
77  }
78 
79  static float kQueuePriority = 1.0f;
80  std::vector<vk::DeviceQueueCreateInfo> infos;
81  for (const auto& item : family_index_map) {
82  vk::DeviceQueueCreateInfo info;
83  info.setQueueFamilyIndex(item.first);
84  info.setQueueCount(item.second + 1);
85  info.setQueuePriorities(kQueuePriority);
86  infos.push_back(info);
87  }
88  return infos;
89 }
90 
91 static std::optional<QueueIndexVK> PickQueue(const vk::PhysicalDevice& device,
92  vk::QueueFlagBits flags) {
93  // This can be modified to ensure that dedicated queues are returned for each
94  // queue type depending on support.
95  const auto families = device.getQueueFamilyProperties();
96  for (size_t i = 0u; i < families.size(); i++) {
97  if (!(families[i].queueFlags & flags)) {
98  continue;
99  }
100  return QueueIndexVK{.family = i, .index = 0};
101  }
102  return std::nullopt;
103 }
104 
105 std::shared_ptr<ContextVK> ContextVK::Create(Settings settings) {
106  auto context = std::shared_ptr<ContextVK>(new ContextVK(settings.flags));
107  context->Setup(std::move(settings));
108  if (!context->IsValid()) {
109  return nullptr;
110  }
111  return context;
112 }
113 
114 // static
115 size_t ContextVK::ChooseThreadCountForWorkers(size_t hardware_concurrency) {
116  // Never create more than 4 worker threads. Attempt to use up to
117  // half of the available concurrency.
118  return std::clamp(hardware_concurrency / 2ull, /*lo=*/1ull, /*hi=*/4ull);
119 }
120 
121 namespace {
122 std::atomic_uint64_t context_count = 0;
123 uint64_t CalculateHash(void* ptr) {
124  return context_count.fetch_add(1);
125 }
126 } // namespace
127 
128 ContextVK::ContextVK(const Flags& flags)
129  : Context(flags), hash_(CalculateHash(this)) {}
130 
132  if (device_holder_ && device_holder_->device) {
133  [[maybe_unused]] auto result = device_holder_->device->waitIdle();
134  }
135  if (command_pool_recycler_) {
136  command_pool_recycler_->DestroyThreadLocalPools();
137  }
138 }
139 
142 }
143 
144 void ContextVK::Setup(Settings settings) {
145  TRACE_EVENT0("impeller", "ContextVK::Setup");
146 
147  if (!settings.proc_address_callback) {
148  VALIDATION_LOG << "Missing proc address callback.";
149  return;
150  }
151 
152  raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
153  ChooseThreadCountForWorkers(std::thread::hardware_concurrency()));
154 
155  auto& dispatcher = VULKAN_HPP_DEFAULT_DISPATCHER;
156  dispatcher.init(settings.proc_address_callback);
157 
158  std::vector<std::string> embedder_instance_extensions;
159  std::vector<std::string> embedder_device_extensions;
160  if (settings.embedder_data.has_value()) {
161  embedder_instance_extensions = settings.embedder_data->instance_extensions;
162  embedder_device_extensions = settings.embedder_data->device_extensions;
163  }
164  auto caps = std::shared_ptr<CapabilitiesVK>(new CapabilitiesVK(
165  settings.enable_validation, //
166  settings.fatal_missing_validations, //
167  /*use_embedder_extensions=*/settings.embedder_data.has_value(), //
168  embedder_instance_extensions, //
169  embedder_device_extensions //
170  ));
171 
172  if (!caps->IsValid()) {
173  VALIDATION_LOG << "Could not determine device capabilities.";
174  return;
175  }
176 
177  gHasValidationLayers = caps->AreValidationsEnabled();
178 
179  auto enabled_layers = caps->GetEnabledLayers();
180  auto enabled_extensions = caps->GetEnabledInstanceExtensions();
181 
182  if (!enabled_layers.has_value() || !enabled_extensions.has_value()) {
183  VALIDATION_LOG << "Device has insufficient capabilities.";
184  return;
185  }
186 
187  vk::InstanceCreateFlags instance_flags = {};
188 
189  if (std::find(enabled_extensions.value().begin(),
190  enabled_extensions.value().end(),
191  "VK_KHR_portability_enumeration") !=
192  enabled_extensions.value().end()) {
193  instance_flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
194  }
195 
196  std::vector<const char*> enabled_layers_c;
197  std::vector<const char*> enabled_extensions_c;
198 
199  for (const auto& layer : enabled_layers.value()) {
200  enabled_layers_c.push_back(layer.c_str());
201  }
202 
203  for (const auto& ext : enabled_extensions.value()) {
204  enabled_extensions_c.push_back(ext.c_str());
205  }
206 
207  vk::ApplicationInfo application_info;
208 
209  // Use the same encoding macro as vulkan versions, but otherwise application
210  // version is intended to be the version of the Impeller engine. This version
211  // information, along with the application name below is provided to allow
212  // IHVs to make optimizations and/or disable functionality based on knowledge
213  // of the engine version (for example, to work around bugs). We don't tie this
214  // to the overall Flutter version as that version is not yet defined when the
215  // engine is compiled. Instead we can manually bump it occassionally.
216  //
217  // variant, major, minor, patch
218  application_info.setApplicationVersion(
219  VK_MAKE_API_VERSION(0, 2, 0, 0) /*version 2.0.0*/);
220  application_info.setApiVersion(VK_API_VERSION_1_1);
221  application_info.setEngineVersion(VK_API_VERSION_1_0);
222  application_info.setPEngineName("Impeller");
223  application_info.setPApplicationName("Impeller");
224 
225  vk::StructureChain<vk::InstanceCreateInfo, vk::ValidationFeaturesEXT>
226  instance_chain;
227 
228  if (!caps->AreValidationsEnabled()) {
229  instance_chain.unlink<vk::ValidationFeaturesEXT>();
230  }
231 
232  std::vector<vk::ValidationFeatureEnableEXT> enabled_validations = {
233  vk::ValidationFeatureEnableEXT::eSynchronizationValidation,
234  };
235 
236  auto validation = instance_chain.get<vk::ValidationFeaturesEXT>();
237  validation.setEnabledValidationFeatures(enabled_validations);
238 
239  auto instance_info = instance_chain.get<vk::InstanceCreateInfo>();
240  instance_info.setPEnabledLayerNames(enabled_layers_c);
241  instance_info.setPEnabledExtensionNames(enabled_extensions_c);
242  instance_info.setPApplicationInfo(&application_info);
243  instance_info.setFlags(instance_flags);
244 
245  auto device_holder = std::make_shared<DeviceHolderImpl>();
246  if (!settings.embedder_data.has_value()) {
247  auto instance = vk::createInstanceUnique(instance_info);
248  if (instance.result != vk::Result::eSuccess) {
249  VALIDATION_LOG << "Could not create Vulkan instance: "
250  << vk::to_string(instance.result);
251  return;
252  }
253  device_holder->instance = std::move(instance.value);
254  } else {
255  device_holder->instance.reset(settings.embedder_data->instance);
256  device_holder->owned = false;
257  }
258  dispatcher.init(device_holder->instance.get());
259 
260  //----------------------------------------------------------------------------
261  /// Setup the debug report.
262  ///
263  /// Do this as early as possible since we could use the debug report from
264  /// initialization issues.
265  ///
266  auto debug_report =
267  std::make_unique<DebugReportVK>(*caps, device_holder->instance.get());
268 
269  if (!debug_report->IsValid()) {
270  VALIDATION_LOG << "Could not set up debug report.";
271  return;
272  }
273 
274  //----------------------------------------------------------------------------
275  /// Pick the physical device.
276  ///
277  if (!settings.embedder_data.has_value()) {
278  auto physical_device =
279  PickPhysicalDevice(*caps, device_holder->instance.get());
280  if (!physical_device.has_value()) {
281  VALIDATION_LOG << "No valid Vulkan device found.";
282  return;
283  }
284  device_holder->physical_device = physical_device.value();
285  } else {
286  device_holder->physical_device = settings.embedder_data->physical_device;
287  }
288 
289  //----------------------------------------------------------------------------
290  /// Pick device queues.
291  ///
292  auto graphics_queue =
293  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eGraphics);
294  auto transfer_queue =
295  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eTransfer);
296  auto compute_queue =
297  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eCompute);
298 
299  if (!graphics_queue.has_value()) {
300  VALIDATION_LOG << "Could not pick graphics queue.";
301  return;
302  }
303  if (!transfer_queue.has_value()) {
304  transfer_queue = graphics_queue.value();
305  }
306  if (!compute_queue.has_value()) {
307  VALIDATION_LOG << "Could not pick compute queue.";
308  return;
309  }
310 
311  //----------------------------------------------------------------------------
312  /// Create the logical device.
313  ///
314  auto enabled_device_extensions =
315  caps->GetEnabledDeviceExtensions(device_holder->physical_device);
316  if (!enabled_device_extensions.has_value()) {
317  // This shouldn't happen since we already did device selection. But
318  // doesn't hurt to check again.
319  return;
320  }
321 
322  std::vector<const char*> enabled_device_extensions_c;
323  for (const auto& ext : enabled_device_extensions.value()) {
324  enabled_device_extensions_c.push_back(ext.c_str());
325  }
326 
327  const auto queue_create_infos = GetQueueCreateInfos(
328  {graphics_queue.value(), compute_queue.value(), transfer_queue.value()});
329 
330  const auto enabled_features =
331  caps->GetEnabledDeviceFeatures(device_holder->physical_device);
332  if (!enabled_features.has_value()) {
333  // This shouldn't happen since the device can't be picked if this was not
334  // true. But doesn't hurt to check.
335  return;
336  }
337 
338  vk::DeviceCreateInfo device_info;
339 
340  device_info.setPNext(&enabled_features.value().get());
341  device_info.setQueueCreateInfos(queue_create_infos);
342  device_info.setPEnabledExtensionNames(enabled_device_extensions_c);
343  // Device layers are deprecated and ignored.
344 
345  if (!settings.embedder_data.has_value()) {
346  auto device_result =
347  device_holder->physical_device.createDeviceUnique(device_info);
348  if (device_result.result != vk::Result::eSuccess) {
349  VALIDATION_LOG << "Could not create logical device.";
350  return;
351  }
352  device_holder->device = std::move(device_result.value);
353  } else {
354  device_holder->device.reset(settings.embedder_data->device);
355  }
356 
357  if (!caps->SetPhysicalDevice(device_holder->physical_device,
358  *enabled_features)) {
359  VALIDATION_LOG << "Capabilities could not be updated.";
360  return;
361  }
362 
363  //----------------------------------------------------------------------------
364  /// Create the allocator.
365  ///
366  auto allocator = std::shared_ptr<AllocatorVK>(new AllocatorVK(
367  weak_from_this(), //
368  application_info.apiVersion, //
369  device_holder->physical_device, //
370  device_holder, //
371  device_holder->instance.get(), //
372  *caps //
373  ));
374 
375  if (!allocator->IsValid()) {
376  VALIDATION_LOG << "Could not create memory allocator.";
377  return;
378  }
379 
380  //----------------------------------------------------------------------------
381  /// Setup the pipeline library.
382  ///
383  auto pipeline_library = std::shared_ptr<PipelineLibraryVK>(
384  new PipelineLibraryVK(device_holder, //
385  caps, //
386  std::move(settings.cache_directory), //
387  raster_message_loop_->GetTaskRunner() //
388  ));
389 
390  if (!pipeline_library->IsValid()) {
391  VALIDATION_LOG << "Could not create pipeline library.";
392  return;
393  }
394 
395  auto sampler_library =
396  std::shared_ptr<SamplerLibraryVK>(new SamplerLibraryVK(device_holder));
397 
398  auto shader_library = std::shared_ptr<ShaderLibraryVK>(
399  new ShaderLibraryVK(device_holder, //
400  settings.shader_libraries_data) //
401  );
402 
403  if (!shader_library->IsValid()) {
404  VALIDATION_LOG << "Could not create shader library.";
405  return;
406  }
407 
408  //----------------------------------------------------------------------------
409  /// Create the fence waiter.
410  ///
411  auto fence_waiter =
412  std::shared_ptr<FenceWaiterVK>(new FenceWaiterVK(device_holder));
413 
414  //----------------------------------------------------------------------------
415  /// Create the resource manager and command pool recycler.
416  ///
417  auto resource_manager = ResourceManagerVK::Create();
418  if (!resource_manager) {
419  VALIDATION_LOG << "Could not create resource manager.";
420  return;
421  }
422 
423  auto command_pool_recycler =
424  std::make_shared<CommandPoolRecyclerVK>(shared_from_this());
425  if (!command_pool_recycler) {
426  VALIDATION_LOG << "Could not create command pool recycler.";
427  return;
428  }
429 
430  auto descriptor_pool_recycler =
431  std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
432  if (!descriptor_pool_recycler) {
433  VALIDATION_LOG << "Could not create descriptor pool recycler.";
434  return;
435  }
436 
437  //----------------------------------------------------------------------------
438  /// Fetch the queues.
439  ///
440  QueuesVK queues;
441  if (!settings.embedder_data.has_value()) {
442  queues = QueuesVK::FromQueueIndices(device_holder->device.get(), //
443  graphics_queue.value(), //
444  compute_queue.value(), //
445  transfer_queue.value() //
446  );
447  } else {
448  queues =
449  QueuesVK::FromEmbedderQueue(settings.embedder_data->queue,
450  settings.embedder_data->queue_family_index);
451  }
452  if (!queues.IsValid()) {
453  VALIDATION_LOG << "Could not fetch device queues.";
454  return;
455  }
456 
457  VkPhysicalDeviceProperties physical_device_properties;
458  dispatcher.vkGetPhysicalDeviceProperties(device_holder->physical_device,
459  &physical_device_properties);
460 
461  //----------------------------------------------------------------------------
462  /// All done!
463  ///
464 
465  // Apply workarounds for broken drivers.
466  auto driver_info =
467  std::make_unique<DriverInfoVK>(device_holder->physical_device);
468  workarounds_ = GetWorkaroundsFromDriverInfo(*driver_info);
469  caps->ApplyWorkarounds(workarounds_);
470  sampler_library->ApplyWorkarounds(workarounds_);
471 
472  device_holder_ = std::move(device_holder);
473  idle_waiter_vk_ = std::make_shared<IdleWaiterVK>(device_holder_);
474  driver_info_ = std::move(driver_info);
475  debug_report_ = std::move(debug_report);
476  allocator_ = std::move(allocator);
477  shader_library_ = std::move(shader_library);
478  sampler_library_ = std::move(sampler_library);
479  pipeline_library_ = std::move(pipeline_library);
480  yuv_conversion_library_ = std::shared_ptr<YUVConversionLibraryVK>(
481  new YUVConversionLibraryVK(device_holder_));
482  queues_ = std::move(queues);
483  device_capabilities_ = std::move(caps);
484  fence_waiter_ = std::move(fence_waiter);
485  resource_manager_ = std::move(resource_manager);
486  command_pool_recycler_ = std::move(command_pool_recycler);
487  descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
488  device_name_ = std::string(physical_device_properties.deviceName);
489  command_queue_vk_ = std::make_shared<CommandQueueVK>(weak_from_this());
490  should_enable_surface_control_ = settings.enable_surface_control;
491  should_batch_cmd_buffers_ = !workarounds_.batch_submit_command_buffer_timeout;
492  is_valid_ = true;
493 
494  // Create the GPU Tracer later because it depends on state from
495  // the ContextVK.
496  gpu_tracer_ = std::make_shared<GPUTracerVK>(weak_from_this(),
497  settings.enable_gpu_tracing);
498  gpu_tracer_->InitializeQueryPool(*this);
499 
500  //----------------------------------------------------------------------------
501  /// Label all the relevant objects. This happens after setup so that the
502  /// debug messengers have had a chance to be set up.
503  ///
504  SetDebugName(GetDevice(), device_holder_->device.get(), "ImpellerDevice");
505 }
506 
508  CapabilitiesVK::Cast(*device_capabilities_).SetOffscreenFormat(pixel_format);
509 }
510 
511 // |Context|
512 std::string ContextVK::DescribeGpuModel() const {
513  return device_name_;
514 }
515 
516 bool ContextVK::IsValid() const {
517  return is_valid_;
518 }
519 
520 std::shared_ptr<Allocator> ContextVK::GetResourceAllocator() const {
521  return allocator_;
522 }
523 
524 std::shared_ptr<ShaderLibrary> ContextVK::GetShaderLibrary() const {
525  return shader_library_;
526 }
527 
528 std::shared_ptr<SamplerLibrary> ContextVK::GetSamplerLibrary() const {
529  return sampler_library_;
530 }
531 
532 std::shared_ptr<PipelineLibrary> ContextVK::GetPipelineLibrary() const {
533  return pipeline_library_;
534 }
535 
536 std::shared_ptr<CommandBuffer> ContextVK::CreateCommandBuffer() const {
537  const auto& recycler = GetCommandPoolRecycler();
538  auto tls_pool = recycler->Get();
539  if (!tls_pool) {
540  return nullptr;
541  }
542 
543  // look up a cached descriptor pool for the current frame and reuse it
544  // if it exists, otherwise create a new pool.
545  std::shared_ptr<DescriptorPoolVK> descriptor_pool;
546  {
547  Lock lock(desc_pool_mutex_);
548  DescriptorPoolMap::iterator current_pool =
549  cached_descriptor_pool_.find(std::this_thread::get_id());
550  if (current_pool == cached_descriptor_pool_.end()) {
551  descriptor_pool = (cached_descriptor_pool_[std::this_thread::get_id()] =
552  descriptor_pool_recycler_->GetDescriptorPool());
553  } else {
554  descriptor_pool = current_pool->second;
555  }
556  }
557 
558  auto tracked_objects = std::make_shared<TrackedObjectsVK>(
559  weak_from_this(), std::move(tls_pool), std::move(descriptor_pool),
560  GetGPUTracer()->CreateGPUProbe());
561  auto queue = GetGraphicsQueue();
562 
563  if (!tracked_objects || !tracked_objects->IsValid() || !queue) {
564  return nullptr;
565  }
566 
567  vk::CommandBufferBeginInfo begin_info;
568  begin_info.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit;
569  if (tracked_objects->GetCommandBuffer().begin(begin_info) !=
570  vk::Result::eSuccess) {
571  VALIDATION_LOG << "Could not begin command buffer.";
572  return nullptr;
573  }
574 
575  tracked_objects->GetGPUProbe().RecordCmdBufferStart(
576  tracked_objects->GetCommandBuffer());
577 
578  return std::shared_ptr<CommandBufferVK>(new CommandBufferVK(
579  shared_from_this(), //
580  GetDeviceHolder(), //
581  std::move(tracked_objects) //
582  ));
583 }
584 
585 vk::Instance ContextVK::GetInstance() const {
586  return *device_holder_->instance;
587 }
588 
589 const vk::Device& ContextVK::GetDevice() const {
590  return device_holder_->device.get();
591 }
592 
593 const std::shared_ptr<fml::ConcurrentTaskRunner>
595  return raster_message_loop_->GetTaskRunner();
596 }
597 
599  // There are multiple objects, for example |CommandPoolVK|, that in their
600  // destructors make a strong reference to |ContextVK|. Resetting these shared
601  // pointers ensures that cleanup happens in a correct order.
602  //
603  // tl;dr: Without it, we get thread::join failures on shutdown.
604  fence_waiter_->Terminate();
605  resource_manager_.reset();
606 
607  raster_message_loop_->Terminate();
608 }
609 
610 std::shared_ptr<SurfaceContextVK> ContextVK::CreateSurfaceContext() {
611  return std::make_shared<SurfaceContextVK>(shared_from_this());
612 }
613 
614 const std::shared_ptr<const Capabilities>& ContextVK::GetCapabilities() const {
615  return device_capabilities_;
616 }
617 
618 const std::shared_ptr<QueueVK>& ContextVK::GetGraphicsQueue() const {
619  return queues_.graphics_queue;
620 }
621 
622 vk::PhysicalDevice ContextVK::GetPhysicalDevice() const {
623  return device_holder_->physical_device;
624 }
625 
626 std::shared_ptr<FenceWaiterVK> ContextVK::GetFenceWaiter() const {
627  return fence_waiter_;
628 }
629 
630 std::shared_ptr<ResourceManagerVK> ContextVK::GetResourceManager() const {
631  return resource_manager_;
632 }
633 
634 std::shared_ptr<CommandPoolRecyclerVK> ContextVK::GetCommandPoolRecycler()
635  const {
636  return command_pool_recycler_;
637 }
638 
639 std::shared_ptr<GPUTracerVK> ContextVK::GetGPUTracer() const {
640  return gpu_tracer_;
641 }
642 
643 std::shared_ptr<DescriptorPoolRecyclerVK> ContextVK::GetDescriptorPoolRecycler()
644  const {
645  return descriptor_pool_recycler_;
646 }
647 
648 std::shared_ptr<CommandQueue> ContextVK::GetCommandQueue() const {
649  return command_queue_vk_;
650 }
651 
653  std::shared_ptr<CommandBuffer> command_buffer) {
654  if (should_batch_cmd_buffers_) {
655  pending_command_buffers_.push_back(std::move(command_buffer));
656  return true;
657  } else {
658  return GetCommandQueue()->Submit({command_buffer}).ok();
659  }
660 }
661 
663  if (pending_command_buffers_.empty()) {
664  return true;
665  }
666 
667  if (should_batch_cmd_buffers_) {
668  bool result = GetCommandQueue()->Submit(pending_command_buffers_).ok();
669  pending_command_buffers_.clear();
670  return result;
671  } else {
672  return true;
673  }
674 }
675 
676 // Creating a render pass is observed to take an additional 6ms on a Pixel 7
677 // device as the driver will lazily bootstrap and compile shaders to do so.
678 // The render pass does not need to be begun or executed.
681  RenderTarget render_target =
682  rt_allocator.CreateOffscreenMSAA(*this, {1, 1}, 1);
683 
684  RenderPassBuilderVK builder;
685 
686  render_target.IterateAllColorAttachments(
687  [&builder](size_t index, const ColorAttachment& attachment) -> bool {
688  builder.SetColorAttachment(
689  index, //
690  attachment.texture->GetTextureDescriptor().format, //
691  attachment.texture->GetTextureDescriptor().sample_count, //
692  attachment.load_action, //
693  attachment.store_action //
694  );
695  return true;
696  });
697 
698  if (auto depth = render_target.GetDepthAttachment(); depth.has_value()) {
700  depth->texture->GetTextureDescriptor().format, //
701  depth->texture->GetTextureDescriptor().sample_count, //
702  depth->load_action, //
703  depth->store_action //
704  );
705  } else if (auto stencil = render_target.GetStencilAttachment();
706  stencil.has_value()) {
707  builder.SetStencilAttachment(
708  stencil->texture->GetTextureDescriptor().format, //
709  stencil->texture->GetTextureDescriptor().sample_count, //
710  stencil->load_action, //
711  stencil->store_action //
712  );
713  }
714 
715  auto pass = builder.Build(GetDevice());
716 }
717 
719  {
720  Lock lock(desc_pool_mutex_);
721  cached_descriptor_pool_.erase(std::this_thread::get_id());
722  }
723  command_pool_recycler_->Dispose();
724 }
725 
726 const std::shared_ptr<YUVConversionLibraryVK>&
728  return yuv_conversion_library_;
729 }
730 
731 const std::unique_ptr<DriverInfoVK>& ContextVK::GetDriverInfo() const {
732  return driver_info_;
733 }
734 
736  return should_enable_surface_control_ &&
737  CapabilitiesVK::Cast(*device_capabilities_)
739 }
740 
743 }
744 
745 bool ContextVK::SubmitOnscreen(std::shared_ptr<CommandBuffer> cmd_buffer) {
746  return EnqueueCommandBuffer(std::move(cmd_buffer));
747 }
748 
750  return workarounds_;
751 }
752 
753 } // namespace impeller
static CapabilitiesVK & Cast(Capabilities &base)
Definition: backend_cast.h:13
The Vulkan layers and extensions wrangler.
bool SupportsExternalSemaphoreExtensions() const
void SetOffscreenFormat(PixelFormat pixel_format) const
std::optional< PhysicalDeviceFeatures > GetEnabledDeviceFeatures(const vk::PhysicalDevice &physical_device) const
void SetOffscreenFormat(PixelFormat pixel_format)
Definition: context_vk.cc:507
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
Definition: context_vk.cc:520
std::shared_ptr< ResourceManagerVK > GetResourceManager() const
Definition: context_vk.cc:630
vk::PhysicalDevice GetPhysicalDevice() const
Definition: context_vk.cc:622
const std::shared_ptr< YUVConversionLibraryVK > & GetYUVConversionLibrary() const
Definition: context_vk.cc:727
bool SetDebugName(T handle, std::string_view label) const
Definition: context_vk.h:151
bool EnqueueCommandBuffer(std::shared_ptr< CommandBuffer > command_buffer) override
Enqueue command_buffer for submission by the end of the frame.
Definition: context_vk.cc:652
const vk::Device & GetDevice() const
Definition: context_vk.cc:589
bool FlushCommandBuffers() override
Flush all pending command buffers.
Definition: context_vk.cc:662
bool IsValid() const override
Determines if a context is valid. If the caller ever receives an invalid context, they must discard i...
Definition: context_vk.cc:516
const std::unique_ptr< DriverInfoVK > & GetDriverInfo() const
Definition: context_vk.cc:731
void DisposeThreadLocalCachedResources() override
Definition: context_vk.cc:718
std::shared_ptr< CommandBuffer > CreateCommandBuffer() const override
Create a new command buffer. Command buffers can be used to encode graphics, blit,...
Definition: context_vk.cc:536
virtual bool SubmitOnscreen(std::shared_ptr< CommandBuffer > cmd_buffer) override
Submit the command buffer that renders to the onscreen surface.
Definition: context_vk.cc:745
std::shared_ptr< SamplerLibrary > GetSamplerLibrary() const override
Returns the library of combined image samplers used in shaders.
Definition: context_vk.cc:528
static std::shared_ptr< ContextVK > Create(Settings settings)
Definition: context_vk.cc:105
std::shared_ptr< PipelineLibrary > GetPipelineLibrary() const override
Returns the library of pipelines used by render or compute commands.
Definition: context_vk.cc:532
const std::shared_ptr< QueueVK > & GetGraphicsQueue() const
Definition: context_vk.cc:618
const std::shared_ptr< const Capabilities > & GetCapabilities() const override
Get the capabilities of Impeller context. All optionally supported feature of the platform,...
Definition: context_vk.cc:614
RuntimeStageBackend GetRuntimeStageBackend() const override
Retrieve the runtime stage for this context type.
Definition: context_vk.cc:741
std::shared_ptr< CommandPoolRecyclerVK > GetCommandPoolRecycler() const
Definition: context_vk.cc:634
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
Definition: context_vk.cc:648
void InitializeCommonlyUsedShadersIfNeeded() const override
Definition: context_vk.cc:679
std::shared_ptr< FenceWaiterVK > GetFenceWaiter() const
Definition: context_vk.cc:626
bool GetShouldEnableSurfaceControlSwapchain() const
Whether the Android Surface control based swapchain should be enabled.
Definition: context_vk.cc:735
std::shared_ptr< GPUTracerVK > GetGPUTracer() const
Definition: context_vk.cc:639
BackendType GetBackendType() const override
Get the graphics backend of an Impeller context.
Definition: context_vk.cc:140
~ContextVK() override
Definition: context_vk.cc:131
std::string DescribeGpuModel() const override
Definition: context_vk.cc:512
const WorkaroundsVK & GetWorkarounds() const
Definition: context_vk.cc:749
const std::shared_ptr< fml::ConcurrentTaskRunner > GetConcurrentWorkerTaskRunner() const
Definition: context_vk.cc:594
static size_t ChooseThreadCountForWorkers(size_t hardware_concurrency)
Definition: context_vk.cc:115
std::shared_ptr< ShaderLibrary > GetShaderLibrary() const override
Returns the library of shaders used to specify the programmable stages of a pipeline.
Definition: context_vk.cc:524
vk::Instance GetInstance() const
Definition: context_vk.cc:585
std::shared_ptr< DeviceHolderVK > GetDeviceHolder() const
Definition: context_vk.h:191
void Shutdown() override
Force all pending asynchronous work to finish. This is achieved by deleting all owned concurrent mess...
Definition: context_vk.cc:598
std::shared_ptr< DescriptorPoolRecyclerVK > GetDescriptorPoolRecycler() const
Definition: context_vk.cc:643
std::shared_ptr< SurfaceContextVK > CreateSurfaceContext()
Definition: context_vk.cc:610
RenderPassBuilderVK & SetDepthStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
RenderPassBuilderVK & SetStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
RenderPassBuilderVK & SetColorAttachment(size_t index, PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action, vk::ImageLayout current_layout=vk::ImageLayout::eUndefined, bool is_swapchain=false)
vk::UniqueRenderPass Build(const vk::Device &device) const
a wrapper around the impeller [Allocator] instance that can be used to provide caching of allocated r...
virtual RenderTarget CreateOffscreenMSAA(const Context &context, ISize size, int mip_count, std::string_view label="Offscreen MSAA", RenderTarget::AttachmentConfigMSAA color_attachment_config=RenderTarget::kDefaultColorAttachmentConfigMSAA, std::optional< RenderTarget::AttachmentConfig > stencil_attachment_config=RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr< Texture > &existing_color_msaa_texture=nullptr, const std::shared_ptr< Texture > &existing_color_resolve_texture=nullptr, const std::shared_ptr< Texture > &existing_depth_stencil_texture=nullptr)
bool IterateAllColorAttachments(const std::function< bool(size_t index, const ColorAttachment &attachment)> &iterator) const
const std::optional< DepthAttachment > & GetDepthAttachment() const
const std::optional< StencilAttachment > & GetStencilAttachment() const
static std::shared_ptr< ResourceManagerVK > Create()
Creates a shared resource manager (a dedicated thread).
int32_t value
ScopedObject< Object > Create(CtorArgs &&... args)
Definition: object.h:161
bool HasValidationLayers()
Definition: context_vk.cc:53
static std::optional< QueueIndexVK > PickQueue(const vk::PhysicalDevice &device, vk::QueueFlagBits flags)
Definition: context_vk.cc:91
static std::optional< vk::PhysicalDevice > PickPhysicalDevice(const CapabilitiesVK &caps, const vk::Instance &instance)
Definition: context_vk.cc:57
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition: formats.h:99
static bool gHasValidationLayers
Definition: context_vk.cc:51
WorkaroundsVK GetWorkaroundsFromDriverInfo(DriverInfoVK &driver_info)
static std::vector< vk::DeviceQueueCreateInfo > GetQueueCreateInfos(std::initializer_list< QueueIndexVK > queues)
Definition: context_vk.cc:68
LoadAction load_action
Definition: formats.h:659
std::shared_ptr< Texture > texture
Definition: formats.h:657
StoreAction store_action
Definition: formats.h:660
static QueuesVK FromEmbedderQueue(vk::Queue queue, uint32_t queue_family_index)
Definition: queue_vk.cc:58
static QueuesVK FromQueueIndices(const vk::Device &device, QueueIndexVK graphics, QueueIndexVK compute, QueueIndexVK transfer)
Definition: queue_vk.cc:67
std::shared_ptr< QueueVK > graphics_queue
Definition: queue_vk.h:64
A non-exhaustive set of driver specific workarounds.
#define VALIDATION_LOG
Definition: validation.h:91