Flutter Impeller
context_vk.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
6 
7 #include "fml/concurrent_message_loop.h"
11 
12 #ifdef FML_OS_ANDROID
13 #include <pthread.h>
14 #include <sys/resource.h>
15 #include <sys/time.h>
16 #endif // FML_OS_ANDROID
17 
18 #include <map>
19 #include <memory>
20 #include <optional>
21 #include <string>
22 #include <vector>
23 
24 #include "flutter/fml/cpu_affinity.h"
25 #include "flutter/fml/trace_event.h"
40 
41 VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
42 
43 namespace impeller {
44 
45 // TODO(csg): Fix this after caps are reworked.
46 static bool gHasValidationLayers = false;
47 
49  return gHasValidationLayers;
50 }
51 
52 static std::optional<vk::PhysicalDevice> PickPhysicalDevice(
53  const CapabilitiesVK& caps,
54  const vk::Instance& instance) {
55  for (const auto& device : instance.enumeratePhysicalDevices().value) {
56  if (caps.GetEnabledDeviceFeatures(device).has_value()) {
57  return device;
58  }
59  }
60  return std::nullopt;
61 }
62 
63 static std::vector<vk::DeviceQueueCreateInfo> GetQueueCreateInfos(
64  std::initializer_list<QueueIndexVK> queues) {
65  std::map<size_t /* family */, size_t /* index */> family_index_map;
66  for (const auto& queue : queues) {
67  family_index_map[queue.family] = 0;
68  }
69  for (const auto& queue : queues) {
70  auto value = family_index_map[queue.family];
71  family_index_map[queue.family] = std::max(value, queue.index);
72  }
73 
74  static float kQueuePriority = 1.0f;
75  std::vector<vk::DeviceQueueCreateInfo> infos;
76  for (const auto& item : family_index_map) {
77  vk::DeviceQueueCreateInfo info;
78  info.setQueueFamilyIndex(item.first);
79  info.setQueueCount(item.second + 1);
80  info.setQueuePriorities(kQueuePriority);
81  infos.push_back(info);
82  }
83  return infos;
84 }
85 
86 static std::optional<QueueIndexVK> PickQueue(const vk::PhysicalDevice& device,
87  vk::QueueFlagBits flags) {
88  // This can be modified to ensure that dedicated queues are returned for each
89  // queue type depending on support.
90  const auto families = device.getQueueFamilyProperties();
91  for (size_t i = 0u; i < families.size(); i++) {
92  if (!(families[i].queueFlags & flags)) {
93  continue;
94  }
95  return QueueIndexVK{.family = i, .index = 0};
96  }
97  return std::nullopt;
98 }
99 
100 std::shared_ptr<ContextVK> ContextVK::Create(Settings settings) {
101  auto context = std::shared_ptr<ContextVK>(new ContextVK());
102  context->Setup(std::move(settings));
103  if (!context->IsValid()) {
104  return nullptr;
105  }
106  return context;
107 }
108 
109 // static
110 size_t ContextVK::ChooseThreadCountForWorkers(size_t hardware_concurrency) {
111  // Never create more than 4 worker threads. Attempt to use up to
112  // half of the available concurrency.
113  return std::clamp(hardware_concurrency / 2ull, /*lo=*/1ull, /*hi=*/4ull);
114 }
115 
116 namespace {
117 thread_local uint64_t tls_context_count = 0;
118 uint64_t CalculateHash(void* ptr) {
119  // You could make a context once per nanosecond for 584 years on one thread
120  // before this overflows.
121  return ++tls_context_count;
122 }
123 } // namespace
124 
125 ContextVK::ContextVK() : hash_(CalculateHash(this)) {}
126 
128  if (device_holder_ && device_holder_->device) {
129  [[maybe_unused]] auto result = device_holder_->device->waitIdle();
130  }
132 }
133 
136 }
137 
138 void ContextVK::Setup(Settings settings) {
139  TRACE_EVENT0("impeller", "ContextVK::Setup");
140 
141  if (!settings.proc_address_callback) {
142  return;
143  }
144 
145  raster_message_loop_ = fml::ConcurrentMessageLoop::Create(
146  ChooseThreadCountForWorkers(std::thread::hardware_concurrency()));
147  raster_message_loop_->PostTaskToAllWorkers([]() {
148  // Currently we only use the worker task pool for small parts of a frame
149  // workload, if this changes this setting may need to be adjusted.
150  fml::RequestAffinity(fml::CpuAffinity::kNotPerformance);
151 #ifdef FML_OS_ANDROID
152  if (::setpriority(PRIO_PROCESS, gettid(), -5) != 0) {
153  FML_LOG(ERROR) << "Failed to set Workers task runner priority";
154  }
155 #endif // FML_OS_ANDROID
156  });
157 
158  auto& dispatcher = VULKAN_HPP_DEFAULT_DISPATCHER;
159  dispatcher.init(settings.proc_address_callback);
160 
161  // Enable Vulkan validation if either:
162  // 1. The user has explicitly enabled it.
163  // 2. We are in a combination of debug mode, and running on Android.
164  // (It's possible 2 is overly conservative and we can simplify this)
165  auto enable_validation = settings.enable_validation;
166 
167 #if defined(FML_OS_ANDROID) && !defined(NDEBUG)
168  enable_validation = true;
169 #endif
170 
171  auto caps =
172  std::shared_ptr<CapabilitiesVK>(new CapabilitiesVK(enable_validation));
173 
174  if (!caps->IsValid()) {
175  VALIDATION_LOG << "Could not determine device capabilities.";
176  return;
177  }
178 
179  gHasValidationLayers = caps->AreValidationsEnabled();
180 
181  auto enabled_layers = caps->GetEnabledLayers();
182  auto enabled_extensions = caps->GetEnabledInstanceExtensions();
183 
184  if (!enabled_layers.has_value() || !enabled_extensions.has_value()) {
185  VALIDATION_LOG << "Device has insufficient capabilities.";
186  return;
187  }
188 
189  vk::InstanceCreateFlags instance_flags = {};
190 
191  if (std::find(enabled_extensions.value().begin(),
192  enabled_extensions.value().end(),
193  "VK_KHR_portability_enumeration") !=
194  enabled_extensions.value().end()) {
195  instance_flags |= vk::InstanceCreateFlagBits::eEnumeratePortabilityKHR;
196  }
197 
198  std::vector<const char*> enabled_layers_c;
199  std::vector<const char*> enabled_extensions_c;
200 
201  for (const auto& layer : enabled_layers.value()) {
202  enabled_layers_c.push_back(layer.c_str());
203  }
204 
205  for (const auto& ext : enabled_extensions.value()) {
206  enabled_extensions_c.push_back(ext.c_str());
207  }
208 
209  vk::ApplicationInfo application_info;
210  application_info.setApplicationVersion(VK_API_VERSION_1_0);
211  application_info.setApiVersion(VK_API_VERSION_1_1);
212  application_info.setEngineVersion(VK_API_VERSION_1_0);
213  application_info.setPEngineName("Impeller");
214  application_info.setPApplicationName("Impeller");
215 
216  vk::StructureChain<vk::InstanceCreateInfo, vk::ValidationFeaturesEXT>
217  instance_chain;
218 
219  if (!caps->AreValidationsEnabled()) {
220  instance_chain.unlink<vk::ValidationFeaturesEXT>();
221  }
222 
223  std::vector<vk::ValidationFeatureEnableEXT> enabled_validations = {
224  vk::ValidationFeatureEnableEXT::eSynchronizationValidation,
225  };
226 
227  auto validation = instance_chain.get<vk::ValidationFeaturesEXT>();
228  validation.setEnabledValidationFeatures(enabled_validations);
229 
230  auto instance_info = instance_chain.get<vk::InstanceCreateInfo>();
231  instance_info.setPEnabledLayerNames(enabled_layers_c);
232  instance_info.setPEnabledExtensionNames(enabled_extensions_c);
233  instance_info.setPApplicationInfo(&application_info);
234  instance_info.setFlags(instance_flags);
235 
236  auto device_holder = std::make_shared<DeviceHolderImpl>();
237  {
238  auto instance = vk::createInstanceUnique(instance_info);
239  if (instance.result != vk::Result::eSuccess) {
240  VALIDATION_LOG << "Could not create Vulkan instance: "
241  << vk::to_string(instance.result);
242  return;
243  }
244  device_holder->instance = std::move(instance.value);
245  }
246  dispatcher.init(device_holder->instance.get());
247 
248  //----------------------------------------------------------------------------
249  /// Setup the debug report.
250  ///
251  /// Do this as early as possible since we could use the debug report from
252  /// initialization issues.
253  ///
254  auto debug_report =
255  std::make_unique<DebugReportVK>(*caps, device_holder->instance.get());
256 
257  if (!debug_report->IsValid()) {
258  VALIDATION_LOG << "Could not set up debug report.";
259  return;
260  }
261 
262  //----------------------------------------------------------------------------
263  /// Pick the physical device.
264  ///
265  {
266  auto physical_device =
267  PickPhysicalDevice(*caps, device_holder->instance.get());
268  if (!physical_device.has_value()) {
269  VALIDATION_LOG << "No valid Vulkan device found.";
270  return;
271  }
272  device_holder->physical_device = physical_device.value();
273  }
274 
275  //----------------------------------------------------------------------------
276  /// Pick device queues.
277  ///
278  auto graphics_queue =
279  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eGraphics);
280  auto transfer_queue =
281  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eTransfer);
282  auto compute_queue =
283  PickQueue(device_holder->physical_device, vk::QueueFlagBits::eCompute);
284 
285  if (!graphics_queue.has_value()) {
286  VALIDATION_LOG << "Could not pick graphics queue.";
287  return;
288  }
289  if (!transfer_queue.has_value()) {
290  FML_LOG(INFO) << "Dedicated transfer queue not avialable.";
291  transfer_queue = graphics_queue.value();
292  }
293  if (!compute_queue.has_value()) {
294  VALIDATION_LOG << "Could not pick compute queue.";
295  return;
296  }
297 
298  //----------------------------------------------------------------------------
299  /// Create the logical device.
300  ///
301  auto enabled_device_extensions =
302  caps->GetEnabledDeviceExtensions(device_holder->physical_device);
303  if (!enabled_device_extensions.has_value()) {
304  // This shouldn't happen since we already did device selection. But
305  // doesn't hurt to check again.
306  return;
307  }
308 
309  std::vector<const char*> enabled_device_extensions_c;
310  for (const auto& ext : enabled_device_extensions.value()) {
311  enabled_device_extensions_c.push_back(ext.c_str());
312  }
313 
314  const auto queue_create_infos = GetQueueCreateInfos(
315  {graphics_queue.value(), compute_queue.value(), transfer_queue.value()});
316 
317  const auto enabled_features =
318  caps->GetEnabledDeviceFeatures(device_holder->physical_device);
319  if (!enabled_features.has_value()) {
320  // This shouldn't happen since the device can't be picked if this was not
321  // true. But doesn't hurt to check.
322  return;
323  }
324 
325  vk::DeviceCreateInfo device_info;
326 
327  device_info.setPNext(&enabled_features.value().get());
328  device_info.setQueueCreateInfos(queue_create_infos);
329  device_info.setPEnabledExtensionNames(enabled_device_extensions_c);
330  // Device layers are deprecated and ignored.
331 
332  {
333  auto device_result =
334  device_holder->physical_device.createDeviceUnique(device_info);
335  if (device_result.result != vk::Result::eSuccess) {
336  VALIDATION_LOG << "Could not create logical device.";
337  return;
338  }
339  device_holder->device = std::move(device_result.value);
340  }
341 
342  if (!caps->SetPhysicalDevice(device_holder->physical_device)) {
343  VALIDATION_LOG << "Capabilities could not be updated.";
344  return;
345  }
346 
347  //----------------------------------------------------------------------------
348  /// Create the allocator.
349  ///
350  auto allocator = std::shared_ptr<AllocatorVK>(new AllocatorVK(
351  weak_from_this(), //
352  application_info.apiVersion, //
353  device_holder->physical_device, //
354  device_holder, //
355  device_holder->instance.get(), //
356  *caps //
357  ));
358 
359  if (!allocator->IsValid()) {
360  VALIDATION_LOG << "Could not create memory allocator.";
361  return;
362  }
363 
364  //----------------------------------------------------------------------------
365  /// Setup the pipeline library.
366  ///
367  auto pipeline_library = std::shared_ptr<PipelineLibraryVK>(
368  new PipelineLibraryVK(device_holder, //
369  caps, //
370  std::move(settings.cache_directory), //
371  raster_message_loop_->GetTaskRunner() //
372  ));
373 
374  if (!pipeline_library->IsValid()) {
375  VALIDATION_LOG << "Could not create pipeline library.";
376  return;
377  }
378 
379  auto sampler_library =
380  std::shared_ptr<SamplerLibraryVK>(new SamplerLibraryVK(device_holder));
381 
382  auto shader_library = std::shared_ptr<ShaderLibraryVK>(
383  new ShaderLibraryVK(device_holder, //
384  settings.shader_libraries_data) //
385  );
386 
387  if (!shader_library->IsValid()) {
388  VALIDATION_LOG << "Could not create shader library.";
389  return;
390  }
391 
392  //----------------------------------------------------------------------------
393  /// Create the fence waiter.
394  ///
395  auto fence_waiter =
396  std::shared_ptr<FenceWaiterVK>(new FenceWaiterVK(device_holder));
397 
398  //----------------------------------------------------------------------------
399  /// Create the resource manager and command pool recycler.
400  ///
401  auto resource_manager = ResourceManagerVK::Create();
402  if (!resource_manager) {
403  VALIDATION_LOG << "Could not create resource manager.";
404  return;
405  }
406 
407  auto command_pool_recycler =
408  std::make_shared<CommandPoolRecyclerVK>(weak_from_this());
409  if (!command_pool_recycler) {
410  VALIDATION_LOG << "Could not create command pool recycler.";
411  return;
412  }
413 
414  auto descriptor_pool_recycler =
415  std::make_shared<DescriptorPoolRecyclerVK>(weak_from_this());
416  if (!descriptor_pool_recycler) {
417  VALIDATION_LOG << "Could not create descriptor pool recycler.";
418  return;
419  }
420 
421  //----------------------------------------------------------------------------
422  /// Fetch the queues.
423  ///
424  QueuesVK queues(device_holder->device.get(), //
425  graphics_queue.value(), //
426  compute_queue.value(), //
427  transfer_queue.value() //
428  );
429  if (!queues.IsValid()) {
430  VALIDATION_LOG << "Could not fetch device queues.";
431  return;
432  }
433 
434  VkPhysicalDeviceProperties physical_device_properties;
435  dispatcher.vkGetPhysicalDeviceProperties(device_holder->physical_device,
436  &physical_device_properties);
437 
438  //----------------------------------------------------------------------------
439  /// All done!
440  ///
441  device_holder_ = std::move(device_holder);
442  driver_info_ =
443  std::make_unique<DriverInfoVK>(device_holder_->physical_device);
444  debug_report_ = std::move(debug_report);
445  allocator_ = std::move(allocator);
446  shader_library_ = std::move(shader_library);
447  sampler_library_ = std::move(sampler_library);
448  pipeline_library_ = std::move(pipeline_library);
449  yuv_conversion_library_ = std::shared_ptr<YUVConversionLibraryVK>(
450  new YUVConversionLibraryVK(device_holder_));
451  queues_ = std::move(queues);
452  device_capabilities_ = std::move(caps);
453  fence_waiter_ = std::move(fence_waiter);
454  resource_manager_ = std::move(resource_manager);
455  command_pool_recycler_ = std::move(command_pool_recycler);
456  descriptor_pool_recycler_ = std::move(descriptor_pool_recycler);
457  device_name_ = std::string(physical_device_properties.deviceName);
458  command_queue_vk_ = std::make_shared<CommandQueueVK>(weak_from_this());
459  is_valid_ = true;
460 
461  // Create the GPU Tracer later because it depends on state from
462  // the ContextVK.
463  gpu_tracer_ = std::make_shared<GPUTracerVK>(weak_from_this(),
464  settings.enable_gpu_tracing);
465  gpu_tracer_->InitializeQueryPool(*this);
466 
467  //----------------------------------------------------------------------------
468  /// Label all the relevant objects. This happens after setup so that the
469  /// debug messengers have had a chance to be set up.
470  ///
471  SetDebugName(GetDevice(), device_holder_->device.get(), "ImpellerDevice");
472 }
473 
475  CapabilitiesVK::Cast(*device_capabilities_).SetOffscreenFormat(pixel_format);
476 }
477 
478 // |Context|
479 std::string ContextVK::DescribeGpuModel() const {
480  return device_name_;
481 }
482 
483 bool ContextVK::IsValid() const {
484  return is_valid_;
485 }
486 
487 std::shared_ptr<Allocator> ContextVK::GetResourceAllocator() const {
488  return allocator_;
489 }
490 
491 std::shared_ptr<ShaderLibrary> ContextVK::GetShaderLibrary() const {
492  return shader_library_;
493 }
494 
495 std::shared_ptr<SamplerLibrary> ContextVK::GetSamplerLibrary() const {
496  return sampler_library_;
497 }
498 
499 std::shared_ptr<PipelineLibrary> ContextVK::GetPipelineLibrary() const {
500  return pipeline_library_;
501 }
502 
503 std::shared_ptr<CommandBuffer> ContextVK::CreateCommandBuffer() const {
504  return std::shared_ptr<CommandBufferVK>(
505  new CommandBufferVK(shared_from_this(), //
506  CreateGraphicsCommandEncoderFactory()) //
507  );
508 }
509 
510 vk::Instance ContextVK::GetInstance() const {
511  return *device_holder_->instance;
512 }
513 
514 const vk::Device& ContextVK::GetDevice() const {
515  return device_holder_->device.get();
516 }
517 
518 const std::shared_ptr<fml::ConcurrentTaskRunner>
520  return raster_message_loop_->GetTaskRunner();
521 }
522 
524  // There are multiple objects, for example |CommandPoolVK|, that in their
525  // destructors make a strong reference to |ContextVK|. Resetting these shared
526  // pointers ensures that cleanup happens in a correct order.
527  //
528  // tl;dr: Without it, we get thread::join failures on shutdown.
529  fence_waiter_.reset();
530  resource_manager_.reset();
531 
532  raster_message_loop_->Terminate();
533 }
534 
535 std::shared_ptr<SurfaceContextVK> ContextVK::CreateSurfaceContext() {
536  return std::make_shared<SurfaceContextVK>(shared_from_this());
537 }
538 
539 const std::shared_ptr<const Capabilities>& ContextVK::GetCapabilities() const {
540  return device_capabilities_;
541 }
542 
543 const std::shared_ptr<QueueVK>& ContextVK::GetGraphicsQueue() const {
544  return queues_.graphics_queue;
545 }
546 
547 vk::PhysicalDevice ContextVK::GetPhysicalDevice() const {
548  return device_holder_->physical_device;
549 }
550 
551 std::shared_ptr<FenceWaiterVK> ContextVK::GetFenceWaiter() const {
552  return fence_waiter_;
553 }
554 
555 std::shared_ptr<ResourceManagerVK> ContextVK::GetResourceManager() const {
556  return resource_manager_;
557 }
558 
559 std::shared_ptr<CommandPoolRecyclerVK> ContextVK::GetCommandPoolRecycler()
560  const {
561  return command_pool_recycler_;
562 }
563 
564 std::unique_ptr<CommandEncoderFactoryVK>
565 ContextVK::CreateGraphicsCommandEncoderFactory() const {
566  return std::make_unique<CommandEncoderFactoryVK>(weak_from_this());
567 }
568 
569 std::shared_ptr<GPUTracerVK> ContextVK::GetGPUTracer() const {
570  return gpu_tracer_;
571 }
572 
573 std::shared_ptr<DescriptorPoolRecyclerVK> ContextVK::GetDescriptorPoolRecycler()
574  const {
575  return descriptor_pool_recycler_;
576 }
577 
578 std::shared_ptr<CommandQueue> ContextVK::GetCommandQueue() const {
579  return command_queue_vk_;
580 }
581 
582 // Creating a render pass is observed to take an additional 6ms on a Pixel 7
583 // device as the driver will lazily bootstrap and compile shaders to do so.
584 // The render pass does not need to be begun or executed.
587  RenderTarget render_target =
588  rt_allocator.CreateOffscreenMSAA(*this, {1, 1}, 1);
589 
590  RenderPassBuilderVK builder;
591  for (const auto& [bind_point, color] : render_target.GetColorAttachments()) {
592  builder.SetColorAttachment(
593  bind_point, //
594  color.texture->GetTextureDescriptor().format, //
595  color.texture->GetTextureDescriptor().sample_count, //
596  color.load_action, //
597  color.store_action //
598  );
599  }
600 
601  if (auto depth = render_target.GetDepthAttachment(); depth.has_value()) {
603  depth->texture->GetTextureDescriptor().format, //
604  depth->texture->GetTextureDescriptor().sample_count, //
605  depth->load_action, //
606  depth->store_action //
607  );
608  } else if (auto stencil = render_target.GetStencilAttachment();
609  stencil.has_value()) {
610  builder.SetStencilAttachment(
611  stencil->texture->GetTextureDescriptor().format, //
612  stencil->texture->GetTextureDescriptor().sample_count, //
613  stencil->load_action, //
614  stencil->store_action //
615  );
616  }
617 
618  auto pass = builder.Build(GetDevice());
619 }
620 
621 const std::shared_ptr<YUVConversionLibraryVK>&
623  return yuv_conversion_library_;
624 }
625 
626 const std::unique_ptr<DriverInfoVK>& ContextVK::GetDriverInfo() const {
627  return driver_info_;
628 }
629 
630 } // namespace impeller
impeller::ContextVK::GetCapabilities
const std::shared_ptr< const Capabilities > & GetCapabilities() const override
Get the capabilities of Impeller context. All optionally supported feature of the platform,...
Definition: context_vk.cc:539
impeller::ContextVK::GetConcurrentWorkerTaskRunner
const std::shared_ptr< fml::ConcurrentTaskRunner > GetConcurrentWorkerTaskRunner() const
Definition: context_vk.cc:519
fence_waiter_vk.h
impeller::CapabilitiesVK::SetOffscreenFormat
void SetOffscreenFormat(PixelFormat pixel_format) const
Definition: capabilities_vk.cc:410
gpu_tracer_vk.h
impeller::ResourceManagerVK::Create
static std::shared_ptr< ResourceManagerVK > Create()
Creates a shared resource manager (a dedicated thread).
Definition: resource_manager_vk.cc:14
impeller::QueueIndexVK
Definition: queue_vk.h:15
allocator_vk.h
impeller::gHasValidationLayers
static bool gHasValidationLayers
Definition: context_vk.cc:46
impeller::CommandPoolRecyclerVK::DestroyThreadLocalPools
static void DestroyThreadLocalPools(const ContextVK *context)
Clean up resources held by all per-thread command pools associated with the given context.
Definition: command_pool_vk.cc:285
impeller::ContextVK::GetCommandQueue
std::shared_ptr< CommandQueue > GetCommandQueue() const override
Return the graphics queue for submitting command buffers.
Definition: context_vk.cc:578
impeller::Context::BackendType
BackendType
Definition: context.h:48
impeller::ContextVK::IsValid
bool IsValid() const override
Determines if a context is valid. If the caller ever receives an invalid context, they must discard i...
Definition: context_vk.cc:483
impeller::ContextVK::GetInstance
vk::Instance GetInstance() const
Definition: context_vk.cc:510
impeller::PickPhysicalDevice
static std::optional< vk::PhysicalDevice > PickPhysicalDevice(const CapabilitiesVK &caps, const vk::Instance &instance)
Definition: context_vk.cc:52
impeller::RenderPassBuilderVK::SetStencilAttachment
RenderPassBuilderVK & SetStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
Definition: render_pass_builder_vk.cc:74
command_encoder_vk.h
impeller::ContextVK::GetPhysicalDevice
vk::PhysicalDevice GetPhysicalDevice() const
Definition: context_vk.cc:547
impeller::ContextVK::GetResourceAllocator
std::shared_ptr< Allocator > GetResourceAllocator() const override
Returns the allocator used to create textures and buffers on the device.
Definition: context_vk.cc:487
impeller::ContextVK::GetBackendType
BackendType GetBackendType() const override
Get the graphics backend of an Impeller context.
Definition: context_vk.cc:134
impeller::ContextVK::CreateCommandBuffer
std::shared_ptr< CommandBuffer > CreateCommandBuffer() const override
Create a new command buffer. Command buffers can be used to encode graphics, blit,...
Definition: context_vk.cc:503
yuv_conversion_library_vk.h
impeller::RenderPassBuilderVK
Definition: render_pass_builder_vk.h:17
surface_context_vk.h
impeller::RenderTarget::GetColorAttachments
const std::map< size_t, ColorAttachment > & GetColorAttachments() const
Definition: render_target.cc:198
validation.h
impeller::CapabilitiesVK::GetEnabledDeviceFeatures
std::optional< PhysicalDeviceFeatures > GetEnabledDeviceFeatures(const vk::PhysicalDevice &physical_device) const
Definition: capabilities_vk.cc:338
impeller::PixelFormat
PixelFormat
The Pixel formats supported by Impeller. The naming convention denotes the usage of the component,...
Definition: formats.h:100
impeller::GetQueueCreateInfos
static std::vector< vk::DeviceQueueCreateInfo > GetQueueCreateInfos(std::initializer_list< QueueIndexVK > queues)
Definition: context_vk.cc:63
capabilities_vk.h
impeller::RenderPassBuilderVK::SetDepthStencilAttachment
RenderPassBuilderVK & SetDepthStencilAttachment(PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
Definition: render_pass_builder_vk.cc:56
command_pool_vk.h
impeller::ContextVK::Settings
Definition: context_vk.h:46
command_buffer_vk.h
impeller::RenderTarget::GetDepthAttachment
const std::optional< DepthAttachment > & GetDepthAttachment() const
Definition: render_target.cc:203
impeller::ContextVK::CreateSurfaceContext
std::shared_ptr< SurfaceContextVK > CreateSurfaceContext()
Definition: context_vk.cc:535
debug_report_vk.h
impeller::ContextVK::GetShaderLibrary
std::shared_ptr< ShaderLibrary > GetShaderLibrary() const override
Returns the library of shaders used to specify the programmable stages of a pipeline.
Definition: context_vk.cc:491
impeller::ContextVK::GetGraphicsQueue
const std::shared_ptr< QueueVK > & GetGraphicsQueue() const
Definition: context_vk.cc:543
impeller::ContextVK::GetDescriptorPoolRecycler
std::shared_ptr< DescriptorPoolRecyclerVK > GetDescriptorPoolRecycler() const
Definition: context_vk.cc:573
impeller::QueuesVK::graphics_queue
std::shared_ptr< QueueVK > graphics_queue
Definition: queue_vk.h:62
impeller::RenderPassBuilderVK::SetColorAttachment
RenderPassBuilderVK & SetColorAttachment(size_t index, PixelFormat format, SampleCount sample_count, LoadAction load_action, StoreAction store_action)
Definition: render_pass_builder_vk.cc:29
render_pass_builder_vk.h
capabilities.h
impeller::RenderTarget
Definition: render_target.h:38
impeller::ContextVK::Create
static std::shared_ptr< ContextVK > Create(Settings settings)
Definition: context_vk.cc:100
impeller::CapabilitiesVK
The Vulkan layers and extensions wrangler.
Definition: capabilities_vk.h:113
impeller::CommandBufferVK
Definition: command_buffer_vk.h:18
impeller::ContextVK::SetDebugName
bool SetDebugName(T handle, std::string_view label) const
Definition: context_vk.h:106
impeller::ContextVK::GetCommandPoolRecycler
std::shared_ptr< CommandPoolRecyclerVK > GetCommandPoolRecycler() const
Definition: context_vk.cc:559
impeller::RenderTargetAllocator::CreateOffscreenMSAA
virtual RenderTarget CreateOffscreenMSAA(const Context &context, ISize size, int mip_count, const std::string &label="Offscreen MSAA", RenderTarget::AttachmentConfigMSAA color_attachment_config=RenderTarget::kDefaultColorAttachmentConfigMSAA, std::optional< RenderTarget::AttachmentConfig > stencil_attachment_config=RenderTarget::kDefaultStencilAttachmentConfig, const std::shared_ptr< Texture > &existing_color_msaa_texture=nullptr, const std::shared_ptr< Texture > &existing_color_resolve_texture=nullptr, const std::shared_ptr< Texture > &existing_depth_stencil_texture=nullptr)
Definition: render_target.cc:313
impeller::RenderTargetAllocator
a wrapper around the impeller [Allocator] instance that can be used to provide caching of allocated r...
Definition: render_target.h:142
impeller::ContextVK::GetGPUTracer
std::shared_ptr< GPUTracerVK > GetGPUTracer() const
Definition: context_vk.cc:569
impeller::ContextVK
Definition: context_vk.h:42
VALIDATION_LOG
#define VALIDATION_LOG
Definition: validation.h:73
impeller::ContextVK::InitializeCommonlyUsedShadersIfNeeded
void InitializeCommonlyUsedShadersIfNeeded() const override
Definition: context_vk.cc:585
impeller::Context::BackendType::kVulkan
@ kVulkan
resource_manager_vk.h
impeller::ContextVK::~ContextVK
~ContextVK() override
Definition: context_vk.cc:127
impeller::ContextVK::GetSamplerLibrary
std::shared_ptr< SamplerLibrary > GetSamplerLibrary() const override
Returns the library of combined image samplers used in shaders.
Definition: context_vk.cc:495
impeller::ContextVK::GetDevice
const vk::Device & GetDevice() const
Definition: context_vk.cc:514
impeller::ContextVK::ChooseThreadCountForWorkers
static size_t ChooseThreadCountForWorkers(size_t hardware_concurrency)
Definition: context_vk.cc:110
impeller::BackendCast< CapabilitiesVK, Capabilities >::Cast
static CapabilitiesVK & Cast(Capabilities &base)
Definition: backend_cast.h:13
impeller::ContextVK::GetResourceManager
std::shared_ptr< ResourceManagerVK > GetResourceManager() const
Definition: context_vk.cc:555
command_queue_vk.h
impeller::ContextVK::GetDriverInfo
const std::unique_ptr< DriverInfoVK > & GetDriverInfo() const
Definition: context_vk.cc:626
impeller::PickQueue
static std::optional< QueueIndexVK > PickQueue(const vk::PhysicalDevice &device, vk::QueueFlagBits flags)
Definition: context_vk.cc:86
impeller::ContextVK::GetYUVConversionLibrary
const std::shared_ptr< YUVConversionLibraryVK > & GetYUVConversionLibrary() const
Definition: context_vk.cc:622
impeller::ContextVK::GetPipelineLibrary
std::shared_ptr< PipelineLibrary > GetPipelineLibrary() const override
Returns the library of pipelines used by render or compute commands.
Definition: context_vk.cc:499
impeller::ContextVK::DescribeGpuModel
std::string DescribeGpuModel() const override
Definition: context_vk.cc:479
impeller::ContextVK::GetFenceWaiter
std::shared_ptr< FenceWaiterVK > GetFenceWaiter() const
Definition: context_vk.cc:551
impeller::RenderPassBuilderVK::Build
vk::UniqueRenderPass Build(const vk::Device &device) const
Definition: render_pass_builder_vk.cc:92
render_target.h
impeller::QueueIndexVK::family
size_t family
Definition: queue_vk.h:16
impeller::ContextVK::SetOffscreenFormat
void SetOffscreenFormat(PixelFormat pixel_format)
Definition: context_vk.cc:474
impeller::RenderTarget::GetStencilAttachment
const std::optional< StencilAttachment > & GetStencilAttachment() const
Definition: render_target.cc:207
context_vk.h
impeller::HasValidationLayers
bool HasValidationLayers()
Definition: context_vk.cc:48
impeller::ContextVK::Shutdown
void Shutdown() override
Force all pending asynchronous work to finish. This is achieved by deleting all owned concurrent mess...
Definition: context_vk.cc:523
impeller
Definition: aiks_blur_unittests.cc:20