Flutter Impeller
reflector.cc
Go to the documentation of this file.
1 // Copyright 2013 The Flutter Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // FLUTTER_NOLINT: https://github.com/flutter/flutter/issues/105732
6 
8 
9 #include <atomic>
10 #include <optional>
11 #include <set>
12 #include <sstream>
13 
14 #include "flutter/fml/logging.h"
15 #include "fml/backtrace.h"
16 #include "impeller/base/strings.h"
24 #include "impeller/geometry/half.h"
28 #include "spirv_common.hpp"
29 
30 namespace impeller {
31 namespace compiler {
32 
33 static std::string ExecutionModelToString(spv::ExecutionModel model) {
34  switch (model) {
35  case spv::ExecutionModel::ExecutionModelVertex:
36  return "vertex";
37  case spv::ExecutionModel::ExecutionModelFragment:
38  return "fragment";
39  case spv::ExecutionModel::ExecutionModelGLCompute:
40  return "compute";
41  default:
42  return "unsupported";
43  }
44 }
45 
46 static std::string StringToShaderStage(const std::string& str) {
47  if (str == "vertex") {
48  return "ShaderStage::kVertex";
49  }
50 
51  if (str == "fragment") {
52  return "ShaderStage::kFragment";
53  }
54 
55  if (str == "compute") {
56  return "ShaderStage::kCompute";
57  }
58 
59  return "ShaderStage::kUnknown";
60 }
61 
63  const std::shared_ptr<const spirv_cross::ParsedIR>& ir,
64  const std::shared_ptr<fml::Mapping>& shader_data,
65  const CompilerBackend& compiler)
66  : options_(std::move(options)),
67  ir_(ir),
68  shader_data_(shader_data),
69  compiler_(compiler) {
70  if (!ir_ || !compiler_) {
71  return;
72  }
73 
74  if (auto template_arguments = GenerateTemplateArguments();
75  template_arguments.has_value()) {
76  template_arguments_ =
77  std::make_unique<nlohmann::json>(std::move(template_arguments.value()));
78  } else {
79  return;
80  }
81 
82  reflection_header_ = GenerateReflectionHeader();
83  if (!reflection_header_) {
84  return;
85  }
86 
87  reflection_cc_ = GenerateReflectionCC();
88  if (!reflection_cc_) {
89  return;
90  }
91 
92  runtime_stage_shader_ = GenerateRuntimeStageData();
93 
94  shader_bundle_data_ = GenerateShaderBundleData();
95  if (!shader_bundle_data_) {
96  return;
97  }
98 
99  is_valid_ = true;
100 }
101 
102 Reflector::~Reflector() = default;
103 
104 bool Reflector::IsValid() const {
105  return is_valid_;
106 }
107 
108 std::shared_ptr<fml::Mapping> Reflector::GetReflectionJSON() const {
109  if (!is_valid_) {
110  return nullptr;
111  }
112 
113  auto json_string =
114  std::make_shared<std::string>(template_arguments_->dump(2u));
115 
116  return std::make_shared<fml::NonOwnedMapping>(
117  reinterpret_cast<const uint8_t*>(json_string->data()),
118  json_string->size(), [json_string](auto, auto) {});
119 }
120 
121 std::shared_ptr<fml::Mapping> Reflector::GetReflectionHeader() const {
122  return reflection_header_;
123 }
124 
125 std::shared_ptr<fml::Mapping> Reflector::GetReflectionCC() const {
126  return reflection_cc_;
127 }
128 
129 std::shared_ptr<RuntimeStageData::Shader> Reflector::GetRuntimeStageShaderData()
130  const {
131  return runtime_stage_shader_;
132 }
133 
134 std::shared_ptr<ShaderBundleData> Reflector::GetShaderBundleData() const {
135  return shader_bundle_data_;
136 }
137 
138 std::optional<nlohmann::json> Reflector::GenerateTemplateArguments() const {
139  nlohmann::json root;
140 
141  const auto& entrypoints = compiler_->get_entry_points_and_stages();
142  if (entrypoints.size() != 1) {
143  VALIDATION_LOG << "Incorrect number of entrypoints in the shader. Found "
144  << entrypoints.size() << " but expected 1.";
145  return std::nullopt;
146  }
147 
148  auto execution_model = entrypoints.front().execution_model;
149  {
150  root["entrypoint"] = options_.entry_point_name;
151  root["shader_name"] = options_.shader_name;
152  root["shader_stage"] = ExecutionModelToString(execution_model);
153  root["header_file_name"] = options_.header_file_name;
154  }
155 
156  const auto shader_resources = compiler_->get_shader_resources();
157 
158  // Subpass Inputs.
159  {
160  auto& subpass_inputs = root["subpass_inputs"] = nlohmann::json::array_t{};
161  if (auto subpass_inputs_json =
162  ReflectResources(shader_resources.subpass_inputs);
163  subpass_inputs_json.has_value()) {
164  for (auto subpass_input : subpass_inputs_json.value()) {
165  subpass_input["descriptor_type"] = "DescriptorType::kInputAttachment";
166  subpass_inputs.emplace_back(std::move(subpass_input));
167  }
168  } else {
169  return std::nullopt;
170  }
171  }
172 
173  // Uniform and storage buffers.
174  {
175  auto& buffers = root["buffers"] = nlohmann::json::array_t{};
176  if (auto uniform_buffers_json =
177  ReflectResources(shader_resources.uniform_buffers);
178  uniform_buffers_json.has_value()) {
179  for (auto uniform_buffer : uniform_buffers_json.value()) {
180  uniform_buffer["descriptor_type"] = "DescriptorType::kUniformBuffer";
181  buffers.emplace_back(std::move(uniform_buffer));
182  }
183  } else {
184  return std::nullopt;
185  }
186  if (auto storage_buffers_json =
187  ReflectResources(shader_resources.storage_buffers);
188  storage_buffers_json.has_value()) {
189  for (auto uniform_buffer : storage_buffers_json.value()) {
190  uniform_buffer["descriptor_type"] = "DescriptorType::kStorageBuffer";
191  buffers.emplace_back(std::move(uniform_buffer));
192  }
193  } else {
194  return std::nullopt;
195  }
196  }
197 
198  {
199  auto& stage_inputs = root["stage_inputs"] = nlohmann::json::array_t{};
200  if (auto stage_inputs_json = ReflectResources(
201  shader_resources.stage_inputs,
202  /*compute_offsets=*/execution_model == spv::ExecutionModelVertex);
203  stage_inputs_json.has_value()) {
204  stage_inputs = std::move(stage_inputs_json.value());
205  } else {
206  return std::nullopt;
207  }
208  }
209 
210  {
211  auto combined_sampled_images =
212  ReflectResources(shader_resources.sampled_images);
213  auto images = ReflectResources(shader_resources.separate_images);
214  auto samplers = ReflectResources(shader_resources.separate_samplers);
215  if (!combined_sampled_images.has_value() || !images.has_value() ||
216  !samplers.has_value()) {
217  return std::nullopt;
218  }
219  auto& sampled_images = root["sampled_images"] = nlohmann::json::array_t{};
220  for (auto value : combined_sampled_images.value()) {
221  value["descriptor_type"] = "DescriptorType::kSampledImage";
222  sampled_images.emplace_back(std::move(value));
223  }
224  for (auto value : images.value()) {
225  value["descriptor_type"] = "DescriptorType::kImage";
226  sampled_images.emplace_back(std::move(value));
227  }
228  for (auto value : samplers.value()) {
229  value["descriptor_type"] = "DescriptorType::kSampledSampler";
230  sampled_images.emplace_back(std::move(value));
231  }
232  }
233 
234  if (auto stage_outputs = ReflectResources(shader_resources.stage_outputs);
235  stage_outputs.has_value()) {
236  root["stage_outputs"] = std::move(stage_outputs.value());
237  } else {
238  return std::nullopt;
239  }
240 
241  {
242  auto& struct_definitions = root["struct_definitions"] =
243  nlohmann::json::array_t{};
244  if (entrypoints.front().execution_model ==
245  spv::ExecutionModel::ExecutionModelVertex &&
246  !shader_resources.stage_inputs.empty()) {
247  if (auto struc =
248  ReflectPerVertexStructDefinition(shader_resources.stage_inputs);
249  struc.has_value()) {
250  struct_definitions.emplace_back(EmitStructDefinition(struc.value()));
251  } else {
252  // If there are stage inputs, it is an error to not generate a per
253  // vertex data struct for a vertex like shader stage.
254  return std::nullopt;
255  }
256  }
257 
258  std::set<spirv_cross::ID> known_structs;
259  ir_->for_each_typed_id<spirv_cross::SPIRType>(
260  [&](uint32_t, const spirv_cross::SPIRType& type) {
261  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
262  return;
263  }
264  // Skip structs that do not have layout offset decorations.
265  // These structs are used internally within the shader and are not
266  // part of the shader's interface.
267  for (size_t i = 0; i < type.member_types.size(); i++) {
268  if (!compiler_->has_member_decoration(type.self, i,
269  spv::DecorationOffset)) {
270  return;
271  }
272  }
273  if (known_structs.find(type.self) != known_structs.end()) {
274  // Iterating over types this way leads to duplicates which may cause
275  // duplicate struct definitions.
276  return;
277  }
278  known_structs.insert(type.self);
279  if (auto struc = ReflectStructDefinition(type.self);
280  struc.has_value()) {
281  struct_definitions.emplace_back(
282  EmitStructDefinition(struc.value()));
283  }
284  });
285  }
286 
287  root["bind_prototypes"] =
288  EmitBindPrototypes(shader_resources, execution_model);
289 
290  return root;
291 }
292 
293 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionHeader() const {
294  return InflateTemplate(kReflectionHeaderTemplate);
295 }
296 
297 std::shared_ptr<fml::Mapping> Reflector::GenerateReflectionCC() const {
298  return InflateTemplate(kReflectionCCTemplate);
299 }
300 
301 static std::optional<RuntimeStageBackend> GetRuntimeStageBackend(
302  TargetPlatform target_platform) {
303  switch (target_platform) {
310  return std::nullopt;
321  }
322  FML_UNREACHABLE();
323 }
324 
325 std::shared_ptr<RuntimeStageData::Shader> Reflector::GenerateRuntimeStageData()
326  const {
327  auto backend = GetRuntimeStageBackend(options_.target_platform);
328  if (!backend.has_value()) {
329  return nullptr;
330  }
331 
332  const auto& entrypoints = compiler_->get_entry_points_and_stages();
333  if (entrypoints.size() != 1u) {
334  VALIDATION_LOG << "Single entrypoint not found.";
335  return nullptr;
336  }
337  auto data = std::make_unique<RuntimeStageData::Shader>();
338  data->entrypoint = options_.entry_point_name;
339  data->stage = entrypoints.front().execution_model;
340  data->shader = shader_data_;
341  data->backend = backend.value();
342 
343  // Sort the IR so that the uniforms are in declaration order.
344  std::vector<spirv_cross::ID> uniforms =
345  SortUniforms(ir_.get(), compiler_.GetCompiler());
346  for (auto& sorted_id : uniforms) {
347  auto var = ir_->ids[sorted_id].get<spirv_cross::SPIRVariable>();
348  const auto spir_type = compiler_->get_type(var.basetype);
349  UniformDescription uniform_description;
350  uniform_description.name = compiler_->get_name(var.self);
351  uniform_description.location = compiler_->get_decoration(
352  var.self, spv::Decoration::DecorationLocation);
353  uniform_description.binding =
354  compiler_->get_decoration(var.self, spv::Decoration::DecorationBinding);
355  uniform_description.type = spir_type.basetype;
356  uniform_description.rows = spir_type.vecsize;
357  uniform_description.columns = spir_type.columns;
358  uniform_description.bit_width = spir_type.width;
359  uniform_description.array_elements = GetArrayElements(spir_type);
360  FML_CHECK(data->backend != RuntimeStageBackend::kVulkan ||
361  spir_type.basetype ==
362  spirv_cross::SPIRType::BaseType::SampledImage)
363  << "Vulkan runtime effect had unexpected uniforms outside of the "
364  "uniform buffer object.";
365  data->uniforms.emplace_back(std::move(uniform_description));
366  }
367 
368  const auto ubos = compiler_->get_shader_resources().uniform_buffers;
369  if (data->backend == RuntimeStageBackend::kVulkan && !ubos.empty()) {
370  if (ubos.size() != 1 && ubos[0].name != RuntimeStage::kVulkanUBOName) {
371  VALIDATION_LOG << "Expected a single UBO resource named "
372  "'"
374  << "' "
375  "for Vulkan runtime stage backend.";
376  return nullptr;
377  }
378 
379  const auto& ubo = ubos[0];
380 
381  size_t binding =
382  compiler_->get_decoration(ubo.id, spv::Decoration::DecorationBinding);
383  auto members = ReadStructMembers(ubo.type_id);
384  std::vector<uint8_t> struct_layout;
385  size_t float_count = 0;
386 
387  for (size_t i = 0; i < members.size(); i += 1) {
388  const auto& member = members[i];
389  std::vector<int> bytes;
390  switch (member.underlying_type) {
392  size_t padding_count =
393  (member.size + sizeof(float) - 1) / sizeof(float);
394  while (padding_count > 0) {
395  struct_layout.push_back(0);
396  padding_count--;
397  }
398  break;
399  }
401  if (member.array_elements > 1) {
402  // For each array element member, insert 1 layout property per byte
403  // and 0 layout property per byte of padding
404  for (auto i = 0; i < member.array_elements; i++) {
405  for (auto j = 0u; j < member.size / sizeof(float); j++) {
406  struct_layout.push_back(1);
407  }
408  for (auto j = 0u; j < member.element_padding / sizeof(float);
409  j++) {
410  struct_layout.push_back(0);
411  }
412  }
413  } else {
414  size_t member_float_count = member.byte_length / sizeof(float);
415  float_count += member_float_count;
416  while (member_float_count > 0) {
417  struct_layout.push_back(1);
418  member_float_count--;
419  }
420  }
421  break;
422  }
424  VALIDATION_LOG << "Non-floating-type struct member " << member.name
425  << " is not supported.";
426  return nullptr;
427  }
428  }
429  data->uniforms.emplace_back(UniformDescription{
430  .name = ubo.name,
431  .location = binding,
432  .binding = binding,
433  .type = spirv_cross::SPIRType::Struct,
434  .struct_layout = std::move(struct_layout),
435  .struct_float_count = float_count,
436  });
437  }
438 
439  // We only need to worry about storing vertex attributes.
440  if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
441  const auto inputs = compiler_->get_shader_resources().stage_inputs;
442  auto input_offsets = ComputeOffsets(inputs);
443  for (const auto& input : inputs) {
444  std::optional<size_t> offset = GetOffset(input.id, input_offsets);
445 
446  const auto type = compiler_->get_type(input.type_id);
447 
448  InputDescription input_description;
449  input_description.name = input.name;
450  input_description.location = compiler_->get_decoration(
451  input.id, spv::Decoration::DecorationLocation);
452  input_description.set = compiler_->get_decoration(
453  input.id, spv::Decoration::DecorationDescriptorSet);
454  input_description.binding = compiler_->get_decoration(
455  input.id, spv::Decoration::DecorationBinding);
456  input_description.type = type.basetype;
457  input_description.bit_width = type.width;
458  input_description.vec_size = type.vecsize;
459  input_description.columns = type.columns;
460  input_description.offset = offset.value_or(0u);
461  data->inputs.emplace_back(std::move(input_description));
462  }
463  }
464 
465  return data;
466 }
467 
468 std::shared_ptr<ShaderBundleData> Reflector::GenerateShaderBundleData() const {
469  const auto& entrypoints = compiler_->get_entry_points_and_stages();
470  if (entrypoints.size() != 1u) {
471  VALIDATION_LOG << "Single entrypoint not found.";
472  return nullptr;
473  }
474  auto data = std::make_shared<ShaderBundleData>(
475  options_.entry_point_name, //
476  entrypoints.front().execution_model, //
477  options_.target_platform //
478  );
479  data->SetShaderData(shader_data_);
480 
481  const auto uniforms = compiler_->get_shader_resources().uniform_buffers;
482  for (const auto& uniform : uniforms) {
483  ShaderBundleData::ShaderUniformStruct uniform_struct;
484  uniform_struct.name = uniform.name;
485  uniform_struct.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
487  uniform_struct.set = compiler_->get_decoration(
488  uniform.id, spv::Decoration::DecorationDescriptorSet);
489  uniform_struct.binding = compiler_->get_decoration(
490  uniform.id, spv::Decoration::DecorationBinding);
491 
492  const auto type = compiler_->get_type(uniform.type_id);
493  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
494  std::cerr << "Error: Uniform \"" << uniform.name
495  << "\" is not a struct. All Flutter GPU shader uniforms must "
496  "be structs."
497  << std::endl;
498  return nullptr;
499  }
500 
501  size_t size_in_bytes = 0;
502  for (const auto& struct_member : ReadStructMembers(uniform.type_id)) {
503  size_in_bytes += struct_member.byte_length;
504  if (StringStartsWith(struct_member.name, "_PADDING_")) {
505  continue;
506  }
507  ShaderBundleData::ShaderUniformStructField uniform_struct_field;
508  uniform_struct_field.name = struct_member.name;
509  uniform_struct_field.type = struct_member.base_type;
510  uniform_struct_field.offset_in_bytes = struct_member.offset;
511  uniform_struct_field.element_size_in_bytes = struct_member.size;
512  uniform_struct_field.total_size_in_bytes = struct_member.byte_length;
513  uniform_struct_field.array_elements = struct_member.array_elements;
514  uniform_struct.fields.push_back(uniform_struct_field);
515  }
516  uniform_struct.size_in_bytes = size_in_bytes;
517 
518  data->AddUniformStruct(uniform_struct);
519  }
520 
521  const auto sampled_images = compiler_->get_shader_resources().sampled_images;
522  for (const auto& image : sampled_images) {
523  ShaderBundleData::ShaderUniformTexture uniform_texture;
524  uniform_texture.name = image.name;
525  uniform_texture.ext_res_0 = compiler_.GetExtendedMSLResourceBinding(
527  uniform_texture.set = compiler_->get_decoration(
528  image.id, spv::Decoration::DecorationDescriptorSet);
529  uniform_texture.binding =
530  compiler_->get_decoration(image.id, spv::Decoration::DecorationBinding);
531  data->AddUniformTexture(uniform_texture);
532  }
533 
534  // We only need to worry about storing vertex attributes.
535  if (entrypoints.front().execution_model == spv::ExecutionModelVertex) {
536  const auto inputs = compiler_->get_shader_resources().stage_inputs;
537  auto input_offsets = ComputeOffsets(inputs);
538  for (const auto& input : inputs) {
539  std::optional<size_t> offset = GetOffset(input.id, input_offsets);
540 
541  const auto type = compiler_->get_type(input.type_id);
542 
543  InputDescription input_description;
544  input_description.name = input.name;
545  input_description.location = compiler_->get_decoration(
546  input.id, spv::Decoration::DecorationLocation);
547  input_description.set = compiler_->get_decoration(
548  input.id, spv::Decoration::DecorationDescriptorSet);
549  input_description.binding = compiler_->get_decoration(
550  input.id, spv::Decoration::DecorationBinding);
551  input_description.type = type.basetype;
552  input_description.bit_width = type.width;
553  input_description.vec_size = type.vecsize;
554  input_description.columns = type.columns;
555  input_description.offset = offset.value_or(0u);
556  data->AddInputDescription(std::move(input_description));
557  }
558  }
559 
560  return data;
561 }
562 
563 std::optional<uint32_t> Reflector::GetArrayElements(
564  const spirv_cross::SPIRType& type) const {
565  if (type.array.empty()) {
566  return std::nullopt;
567  }
568  FML_CHECK(type.array.size() == 1)
569  << "Multi-dimensional arrays are not supported.";
570  FML_CHECK(type.array_size_literal.front())
571  << "Must use a literal for array sizes.";
572  return type.array.front();
573 }
574 
575 static std::string ToString(CompilerBackend::Type type) {
576  switch (type) {
578  return "Metal Shading Language";
580  return "OpenGL Shading Language";
582  return "OpenGL Shading Language (Relaxed Vulkan Semantics)";
584  return "SkSL Shading Language";
585  }
586  FML_UNREACHABLE();
587 }
588 
589 std::shared_ptr<fml::Mapping> Reflector::InflateTemplate(
590  std::string_view tmpl) const {
591  inja::Environment env;
592  env.set_trim_blocks(true);
593  env.set_lstrip_blocks(true);
594 
595  env.add_callback("camel_case", 1u, [](inja::Arguments& args) {
596  return ToCamelCase(args.at(0u)->get<std::string>());
597  });
598 
599  env.add_callback("to_shader_stage", 1u, [](inja::Arguments& args) {
600  return StringToShaderStage(args.at(0u)->get<std::string>());
601  });
602 
603  env.add_callback("get_generator_name", 0u,
604  [type = compiler_.GetType()](inja::Arguments& args) {
605  return ToString(type);
606  });
607 
608  auto inflated_template =
609  std::make_shared<std::string>(env.render(tmpl, *template_arguments_));
610 
611  return std::make_shared<fml::NonOwnedMapping>(
612  reinterpret_cast<const uint8_t*>(inflated_template->data()),
613  inflated_template->size(), [inflated_template](auto, auto) {});
614 }
615 
616 std::vector<size_t> Reflector::ComputeOffsets(
617  const spirv_cross::SmallVector<spirv_cross::Resource>& resources) const {
618  std::vector<size_t> offsets(resources.size(), 0);
619  if (resources.size() == 0) {
620  return offsets;
621  }
622  for (const auto& resource : resources) {
623  const auto type = compiler_->get_type(resource.type_id);
624  auto location = compiler_->get_decoration(
625  resource.id, spv::Decoration::DecorationLocation);
626  // Malformed shader, will be caught later on.
627  if (location >= resources.size() || location < 0) {
628  location = 0;
629  }
630  offsets[location] = (type.width * type.vecsize) / 8;
631  }
632  for (size_t i = 1; i < resources.size(); i++) {
633  offsets[i] += offsets[i - 1];
634  }
635  for (size_t i = resources.size() - 1; i > 0; i--) {
636  offsets[i] = offsets[i - 1];
637  }
638  offsets[0] = 0;
639 
640  return offsets;
641 }
642 
643 std::optional<size_t> Reflector::GetOffset(
644  spirv_cross::ID id,
645  const std::vector<size_t>& offsets) const {
646  uint32_t location =
647  compiler_->get_decoration(id, spv::Decoration::DecorationLocation);
648  if (location >= offsets.size()) {
649  return std::nullopt;
650  }
651  return offsets[location];
652 }
653 
654 std::optional<nlohmann::json::object_t> Reflector::ReflectResource(
655  const spirv_cross::Resource& resource,
656  std::optional<size_t> offset) const {
657  nlohmann::json::object_t result;
658 
659  result["name"] = resource.name;
660  result["descriptor_set"] = compiler_->get_decoration(
661  resource.id, spv::Decoration::DecorationDescriptorSet);
662  result["binding"] = compiler_->get_decoration(
663  resource.id, spv::Decoration::DecorationBinding);
664  result["set"] = compiler_->get_decoration(
665  resource.id, spv::Decoration::DecorationDescriptorSet);
666  result["location"] = compiler_->get_decoration(
667  resource.id, spv::Decoration::DecorationLocation);
668  result["index"] =
669  compiler_->get_decoration(resource.id, spv::Decoration::DecorationIndex);
670  result["ext_res_0"] = compiler_.GetExtendedMSLResourceBinding(
672  result["ext_res_1"] = compiler_.GetExtendedMSLResourceBinding(
674  result["relaxed_precision"] =
675  compiler_->get_decoration(
676  resource.id, spv::Decoration::DecorationRelaxedPrecision) == 1;
677  result["offset"] = offset.value_or(0u);
678  auto type = ReflectType(resource.type_id);
679  if (!type.has_value()) {
680  return std::nullopt;
681  }
682  result["type"] = std::move(type.value());
683  return result;
684 }
685 
686 std::optional<nlohmann::json::object_t> Reflector::ReflectType(
687  const spirv_cross::TypeID& type_id) const {
688  nlohmann::json::object_t result;
689 
690  const auto type = compiler_->get_type(type_id);
691 
692  result["type_name"] = StructMember::BaseTypeToString(type.basetype);
693  result["bit_width"] = type.width;
694  result["vec_size"] = type.vecsize;
695  result["columns"] = type.columns;
696  auto& members = result["members"] = nlohmann::json::array_t{};
697  if (type.basetype == spirv_cross::SPIRType::BaseType::Struct) {
698  for (const auto& struct_member : ReadStructMembers(type_id)) {
699  auto member = nlohmann::json::object_t{};
700  member["name"] = struct_member.name;
701  member["type"] = struct_member.type;
702  member["base_type"] =
703  StructMember::BaseTypeToString(struct_member.base_type);
704  member["offset"] = struct_member.offset;
705  member["size"] = struct_member.size;
706  member["byte_length"] = struct_member.byte_length;
707  if (struct_member.array_elements.has_value()) {
708  member["array_elements"] = struct_member.array_elements.value();
709  } else {
710  member["array_elements"] = "std::nullopt";
711  }
712  members.emplace_back(std::move(member));
713  }
714  }
715 
716  return result;
717 }
718 
719 std::optional<nlohmann::json::array_t> Reflector::ReflectResources(
720  const spirv_cross::SmallVector<spirv_cross::Resource>& resources,
721  bool compute_offsets) const {
722  nlohmann::json::array_t result;
723  result.reserve(resources.size());
724  std::vector<size_t> offsets;
725  if (compute_offsets) {
726  offsets = ComputeOffsets(resources);
727  }
728  for (const auto& resource : resources) {
729  std::optional<size_t> maybe_offset = std::nullopt;
730  if (compute_offsets) {
731  maybe_offset = GetOffset(resource.id, offsets);
732  }
733  if (auto reflected = ReflectResource(resource, maybe_offset);
734  reflected.has_value()) {
735  result.emplace_back(std::move(reflected.value()));
736  } else {
737  return std::nullopt;
738  }
739  }
740  return result;
741 }
742 
743 static std::string TypeNameWithPaddingOfSize(size_t size) {
744  std::stringstream stream;
745  stream << "Padding<" << size << ">";
746  return stream.str();
747 }
748 
749 struct KnownType {
750  std::string name;
751  size_t byte_size = 0;
752 };
753 
754 static std::optional<KnownType> ReadKnownScalarType(
755  spirv_cross::SPIRType::BaseType type) {
756  switch (type) {
757  case spirv_cross::SPIRType::BaseType::Boolean:
758  return KnownType{
759  .name = "bool",
760  .byte_size = sizeof(bool),
761  };
762  case spirv_cross::SPIRType::BaseType::Float:
763  return KnownType{
764  .name = "Scalar",
765  .byte_size = sizeof(Scalar),
766  };
767  case spirv_cross::SPIRType::BaseType::Half:
768  return KnownType{
769  .name = "Half",
770  .byte_size = sizeof(Half),
771  };
772  case spirv_cross::SPIRType::BaseType::UInt:
773  return KnownType{
774  .name = "uint32_t",
775  .byte_size = sizeof(uint32_t),
776  };
777  case spirv_cross::SPIRType::BaseType::Int:
778  return KnownType{
779  .name = "int32_t",
780  .byte_size = sizeof(int32_t),
781  };
782  default:
783  break;
784  }
785  return std::nullopt;
786 }
787 
788 //------------------------------------------------------------------------------
789 /// @brief Get the reflected struct size. In the vast majority of the
790 /// cases, this is the same as the declared struct size as given by
791 /// the compiler. But, additional padding may need to be introduced
792 /// after the end of the struct to keep in line with the alignment
793 /// requirement of the individual struct members. This method
794 /// figures out the actual size of the reflected struct that can be
795 /// referenced in native code.
796 ///
797 /// @param[in] members The members
798 ///
799 /// @return The reflected structure size.
800 ///
801 static size_t GetReflectedStructSize(const std::vector<StructMember>& members) {
802  auto struct_size = 0u;
803  for (const auto& member : members) {
804  struct_size += member.byte_length;
805  }
806  return struct_size;
807 }
808 
809 std::vector<StructMember> Reflector::ReadStructMembers(
810  const spirv_cross::TypeID& type_id) const {
811  const auto& struct_type = compiler_->get_type(type_id);
812  FML_CHECK(struct_type.basetype == spirv_cross::SPIRType::BaseType::Struct);
813 
814  std::vector<StructMember> result;
815 
816  size_t current_byte_offset = 0;
817  size_t max_member_alignment = 0;
818 
819  for (size_t i = 0; i < struct_type.member_types.size(); i++) {
820  const auto& member = compiler_->get_type(struct_type.member_types[i]);
821  const auto struct_member_offset =
822  compiler_->type_struct_member_offset(struct_type, i);
823  auto array_elements = GetArrayElements(member);
824 
825  if (struct_member_offset > current_byte_offset) {
826  const auto alignment_pad = struct_member_offset - current_byte_offset;
827  result.emplace_back(StructMember{
828  TypeNameWithPaddingOfSize(alignment_pad), // type
829  spirv_cross::SPIRType::BaseType::Void, // basetype
830  SPrintF("_PADDING_%s_",
831  GetMemberNameAtIndex(struct_type, i).c_str()), // name
832  current_byte_offset, // offset
833  alignment_pad, // size
834  alignment_pad, // byte_length
835  std::nullopt, // array_elements
836  0, // element_padding
837  });
838  current_byte_offset += alignment_pad;
839  }
840 
841  max_member_alignment =
842  std::max<size_t>(max_member_alignment,
843  (member.width / 8) * member.columns * member.vecsize);
844 
845  FML_CHECK(current_byte_offset == struct_member_offset);
846 
847  // A user defined struct.
848  if (member.basetype == spirv_cross::SPIRType::BaseType::Struct) {
849  const size_t size =
850  GetReflectedStructSize(ReadStructMembers(member.self));
851  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
852  if (stride == 0) {
853  stride = size;
854  }
855  uint32_t element_padding = stride - size;
856  result.emplace_back(StructMember{
857  compiler_->get_name(member.self), // type
858  member.basetype, // basetype
859  GetMemberNameAtIndex(struct_type, i), // name
860  struct_member_offset, // offset
861  size, // size
862  stride * array_elements.value_or(1), // byte_length
863  array_elements, // array_elements
864  element_padding, // element_padding
865  });
866  current_byte_offset += stride * array_elements.value_or(1);
867  continue;
868  }
869 
870  // Tightly packed 4x4 Matrix is special cased as we know how to work with
871  // those.
872  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
873  member.width == sizeof(Scalar) * 8 && //
874  member.columns == 4 && //
875  member.vecsize == 4 //
876  ) {
877  uint32_t stride = GetArrayStride<sizeof(Matrix)>(struct_type, member, i);
878  uint32_t element_padding = stride - sizeof(Matrix);
879  result.emplace_back(StructMember{
880  "Matrix", // type
881  member.basetype, // basetype
882  GetMemberNameAtIndex(struct_type, i), // name
883  struct_member_offset, // offset
884  sizeof(Matrix), // size
885  stride * array_elements.value_or(1), // byte_length
886  array_elements, // array_elements
887  element_padding, // element_padding
888  });
889  current_byte_offset += stride * array_elements.value_or(1);
890  continue;
891  }
892 
893  // Tightly packed UintPoint32 (uvec2)
894  if (member.basetype == spirv_cross::SPIRType::BaseType::UInt && //
895  member.width == sizeof(uint32_t) * 8 && //
896  member.columns == 1 && //
897  member.vecsize == 2 //
898  ) {
899  uint32_t stride =
900  GetArrayStride<sizeof(UintPoint32)>(struct_type, member, i);
901  uint32_t element_padding = stride - sizeof(UintPoint32);
902  result.emplace_back(StructMember{
903  "UintPoint32", // type
904  member.basetype, // basetype
905  GetMemberNameAtIndex(struct_type, i), // name
906  struct_member_offset, // offset
907  sizeof(UintPoint32), // size
908  stride * array_elements.value_or(1), // byte_length
909  array_elements, // array_elements
910  element_padding, // element_padding
911  });
912  current_byte_offset += stride * array_elements.value_or(1);
913  continue;
914  }
915 
916  // Tightly packed UintPoint32 (ivec2)
917  if (member.basetype == spirv_cross::SPIRType::BaseType::Int && //
918  member.width == sizeof(int32_t) * 8 && //
919  member.columns == 1 && //
920  member.vecsize == 2 //
921  ) {
922  uint32_t stride =
923  GetArrayStride<sizeof(IPoint32)>(struct_type, member, i);
924  uint32_t element_padding = stride - sizeof(IPoint32);
925  result.emplace_back(StructMember{
926  "IPoint32", // type
927  member.basetype, // basetype
928  GetMemberNameAtIndex(struct_type, i), // name
929  struct_member_offset, // offset
930  sizeof(IPoint32), // size
931  stride * array_elements.value_or(1), // byte_length
932  array_elements, // array_elements
933  element_padding, // element_padding
934  });
935  current_byte_offset += stride * array_elements.value_or(1);
936  continue;
937  }
938 
939  // Tightly packed Point (vec2).
940  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
941  member.width == sizeof(float) * 8 && //
942  member.columns == 1 && //
943  member.vecsize == 2 //
944  ) {
945  uint32_t stride = GetArrayStride<sizeof(Point)>(struct_type, member, i);
946  uint32_t element_padding = stride - sizeof(Point);
947  result.emplace_back(StructMember{
948  "Point", // type
949  member.basetype, // basetype
950  GetMemberNameAtIndex(struct_type, i), // name
951  struct_member_offset, // offset
952  sizeof(Point), // size
953  stride * array_elements.value_or(1), // byte_length
954  array_elements, // array_elements
955  element_padding, // element_padding
956  });
957  current_byte_offset += stride * array_elements.value_or(1);
958  continue;
959  }
960 
961  // Tightly packed Vector3.
962  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
963  member.width == sizeof(float) * 8 && //
964  member.columns == 1 && //
965  member.vecsize == 3 //
966  ) {
967  uint32_t stride = GetArrayStride<sizeof(Vector3)>(struct_type, member, i);
968  uint32_t element_padding = stride - sizeof(Vector3);
969  result.emplace_back(StructMember{
970  "Vector3", // type
971  member.basetype, // basetype
972  GetMemberNameAtIndex(struct_type, i), // name
973  struct_member_offset, // offset
974  sizeof(Vector3), // size
975  stride * array_elements.value_or(1), // byte_length
976  array_elements, // array_elements
977  element_padding, // element_padding
978  });
979  current_byte_offset += stride * array_elements.value_or(1);
980  continue;
981  }
982 
983  // Tightly packed Vector4.
984  if (member.basetype == spirv_cross::SPIRType::BaseType::Float && //
985  member.width == sizeof(float) * 8 && //
986  member.columns == 1 && //
987  member.vecsize == 4 //
988  ) {
989  uint32_t stride = GetArrayStride<sizeof(Vector4)>(struct_type, member, i);
990  uint32_t element_padding = stride - sizeof(Vector4);
991  result.emplace_back(StructMember{
992  "Vector4", // type
993  member.basetype, // basetype
994  GetMemberNameAtIndex(struct_type, i), // name
995  struct_member_offset, // offset
996  sizeof(Vector4), // size
997  stride * array_elements.value_or(1), // byte_length
998  array_elements, // array_elements
999  element_padding, // element_padding
1000  });
1001  current_byte_offset += stride * array_elements.value_or(1);
1002  continue;
1003  }
1004 
1005  // Tightly packed half Point (vec2).
1006  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1007  member.width == sizeof(Half) * 8 && //
1008  member.columns == 1 && //
1009  member.vecsize == 2 //
1010  ) {
1011  uint32_t stride =
1012  GetArrayStride<sizeof(HalfVector2)>(struct_type, member, i);
1013  uint32_t element_padding = stride - sizeof(HalfVector2);
1014  result.emplace_back(StructMember{
1015  "HalfVector2", // type
1016  member.basetype, // basetype
1017  GetMemberNameAtIndex(struct_type, i), // name
1018  struct_member_offset, // offset
1019  sizeof(HalfVector2), // size
1020  stride * array_elements.value_or(1), // byte_length
1021  array_elements, // array_elements
1022  element_padding, // element_padding
1023  });
1024  current_byte_offset += stride * array_elements.value_or(1);
1025  continue;
1026  }
1027 
1028  // Tightly packed Half Float Vector3.
1029  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1030  member.width == sizeof(Half) * 8 && //
1031  member.columns == 1 && //
1032  member.vecsize == 3 //
1033  ) {
1034  uint32_t stride =
1035  GetArrayStride<sizeof(HalfVector3)>(struct_type, member, i);
1036  uint32_t element_padding = stride - sizeof(HalfVector3);
1037  result.emplace_back(StructMember{
1038  "HalfVector3", // type
1039  member.basetype, // basetype
1040  GetMemberNameAtIndex(struct_type, i), // name
1041  struct_member_offset, // offset
1042  sizeof(HalfVector3), // size
1043  stride * array_elements.value_or(1), // byte_length
1044  array_elements, // array_elements
1045  element_padding, // element_padding
1046  });
1047  current_byte_offset += stride * array_elements.value_or(1);
1048  continue;
1049  }
1050 
1051  // Tightly packed Half Float Vector4.
1052  if (member.basetype == spirv_cross::SPIRType::BaseType::Half && //
1053  member.width == sizeof(Half) * 8 && //
1054  member.columns == 1 && //
1055  member.vecsize == 4 //
1056  ) {
1057  uint32_t stride =
1058  GetArrayStride<sizeof(HalfVector4)>(struct_type, member, i);
1059  uint32_t element_padding = stride - sizeof(HalfVector4);
1060  result.emplace_back(StructMember{
1061  "HalfVector4", // type
1062  member.basetype, // basetype
1063  GetMemberNameAtIndex(struct_type, i), // name
1064  struct_member_offset, // offset
1065  sizeof(HalfVector4), // size
1066  stride * array_elements.value_or(1), // byte_length
1067  array_elements, // array_elements
1068  element_padding, // element_padding
1069  });
1070  current_byte_offset += stride * array_elements.value_or(1);
1071  continue;
1072  }
1073 
1074  // Other isolated scalars (like bool, int, float/Scalar, etc..).
1075  {
1076  auto maybe_known_type = ReadKnownScalarType(member.basetype);
1077  if (maybe_known_type.has_value() && //
1078  member.columns == 1 && //
1079  member.vecsize == 1 //
1080  ) {
1081  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1082  if (stride == 0) {
1083  stride = maybe_known_type.value().byte_size;
1084  }
1085  uint32_t element_padding = stride - maybe_known_type.value().byte_size;
1086  // Add the type directly.
1087  result.emplace_back(StructMember{
1088  maybe_known_type.value().name, // type
1089  member.basetype, // basetype
1090  GetMemberNameAtIndex(struct_type, i), // name
1091  struct_member_offset, // offset
1092  maybe_known_type.value().byte_size, // size
1093  stride * array_elements.value_or(1), // byte_length
1094  array_elements, // array_elements
1095  element_padding, // element_padding
1096  });
1097  current_byte_offset += stride * array_elements.value_or(1);
1098  continue;
1099  }
1100  }
1101 
1102  // Catch all for unknown types. Just add the necessary padding to the struct
1103  // and move on.
1104  {
1105  const size_t size = (member.width * member.columns * member.vecsize) / 8u;
1106  uint32_t stride = GetArrayStride<0>(struct_type, member, i);
1107  if (stride == 0) {
1108  stride = size;
1109  }
1110  auto element_padding = stride - size;
1111  result.emplace_back(StructMember{
1112  TypeNameWithPaddingOfSize(size), // type
1113  member.basetype, // basetype
1114  GetMemberNameAtIndex(struct_type, i), // name
1115  struct_member_offset, // offset
1116  size, // size
1117  stride * array_elements.value_or(1), // byte_length
1118  array_elements, // array_elements
1119  element_padding, // element_padding
1120  });
1121  current_byte_offset += stride * array_elements.value_or(1);
1122  continue;
1123  }
1124  }
1125 
1126  if (max_member_alignment > 0u) {
1127  const auto struct_length = current_byte_offset;
1128  {
1129  const auto excess = struct_length % max_member_alignment;
1130  if (excess != 0) {
1131  const auto padding = max_member_alignment - excess;
1132  result.emplace_back(StructMember{
1134  spirv_cross::SPIRType::BaseType::Void, // basetype
1135  "_PADDING_", // name
1136  current_byte_offset, // offset
1137  padding, // size
1138  padding, // byte_length
1139  std::nullopt, // array_elements
1140  0, // element_padding
1141  });
1142  }
1143  }
1144  }
1145 
1146  return result;
1147 }
1148 
1149 std::optional<Reflector::StructDefinition> Reflector::ReflectStructDefinition(
1150  const spirv_cross::TypeID& type_id) const {
1151  const auto& type = compiler_->get_type(type_id);
1152  if (type.basetype != spirv_cross::SPIRType::BaseType::Struct) {
1153  return std::nullopt;
1154  }
1155 
1156  const auto struct_name = compiler_->get_name(type_id);
1157  if (struct_name.find("_RESERVED_IDENTIFIER_") != std::string::npos) {
1158  return std::nullopt;
1159  }
1160 
1161  auto struct_members = ReadStructMembers(type_id);
1162  auto reflected_struct_size = GetReflectedStructSize(struct_members);
1163 
1164  StructDefinition struc;
1165  struc.name = struct_name;
1166  struc.byte_length = reflected_struct_size;
1167  struc.members = std::move(struct_members);
1168  return struc;
1169 }
1170 
1171 nlohmann::json::object_t Reflector::EmitStructDefinition(
1172  std::optional<Reflector::StructDefinition> struc) const {
1173  nlohmann::json::object_t result;
1174  result["name"] = struc->name;
1175  result["byte_length"] = struc->byte_length;
1176  auto& members = result["members"] = nlohmann::json::array_t{};
1177  for (const auto& struct_member : struc->members) {
1178  auto& member = members.emplace_back(nlohmann::json::object_t{});
1179  member["name"] = struct_member.name;
1180  member["type"] = struct_member.type;
1181  member["base_type"] =
1182  StructMember::BaseTypeToString(struct_member.base_type);
1183  member["offset"] = struct_member.offset;
1184  member["byte_length"] = struct_member.byte_length;
1185  if (struct_member.array_elements.has_value()) {
1186  member["array_elements"] = struct_member.array_elements.value();
1187  } else {
1188  member["array_elements"] = "std::nullopt";
1189  }
1190  member["element_padding"] = struct_member.element_padding;
1191  }
1192  return result;
1193 }
1194 
1195 struct VertexType {
1196  std::string type_name;
1197  spirv_cross::SPIRType::BaseType base_type;
1198  std::string variable_name;
1199  size_t byte_length = 0u;
1200 };
1201 
1203  const spirv_cross::Compiler& compiler,
1204  const spirv_cross::Resource* resource) {
1205  VertexType result;
1206  result.variable_name = resource->name;
1207  const auto& type = compiler.get_type(resource->type_id);
1208  result.base_type = type.basetype;
1209  const auto total_size = type.columns * type.vecsize * type.width / 8u;
1210  result.byte_length = total_size;
1211 
1212  if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1213  type.columns == 1u && type.vecsize == 2u &&
1214  type.width == sizeof(float) * 8u) {
1215  result.type_name = "Point";
1216  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1217  type.columns == 1u && type.vecsize == 4u &&
1218  type.width == sizeof(float) * 8u) {
1219  result.type_name = "Vector4";
1220  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1221  type.columns == 1u && type.vecsize == 3u &&
1222  type.width == sizeof(float) * 8u) {
1223  result.type_name = "Vector3";
1224  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Float &&
1225  type.columns == 1u && type.vecsize == 1u &&
1226  type.width == sizeof(float) * 8u) {
1227  result.type_name = "Scalar";
1228  } else if (type.basetype == spirv_cross::SPIRType::BaseType::Int &&
1229  type.columns == 1u && type.vecsize == 1u &&
1230  type.width == sizeof(int32_t) * 8u) {
1231  result.type_name = "int32_t";
1232  } else {
1233  // Catch all unknown padding.
1234  result.type_name = TypeNameWithPaddingOfSize(total_size);
1235  }
1236 
1237  return result;
1238 }
1239 
1240 std::optional<Reflector::StructDefinition>
1241 Reflector::ReflectPerVertexStructDefinition(
1242  const spirv_cross::SmallVector<spirv_cross::Resource>& stage_inputs) const {
1243  // Avoid emitting a zero sized structure. The code gen templates assume a
1244  // non-zero size.
1245  if (stage_inputs.empty()) {
1246  return std::nullopt;
1247  }
1248 
1249  // Validate locations are contiguous and there are no duplicates.
1250  std::set<uint32_t> locations;
1251  for (const auto& input : stage_inputs) {
1252  auto location = compiler_->get_decoration(
1253  input.id, spv::Decoration::DecorationLocation);
1254  if (locations.count(location) != 0) {
1255  // Duplicate location. Bail.
1256  return std::nullopt;
1257  }
1258  locations.insert(location);
1259  }
1260 
1261  for (size_t i = 0; i < locations.size(); i++) {
1262  if (locations.count(i) != 1) {
1263  // Locations are not contiguous. This usually happens when a single stage
1264  // input takes multiple input slots. No reflection information can be
1265  // generated for such cases anyway. So bail! It is up to the shader author
1266  // to make sure one stage input maps to a single input slot.
1267  return std::nullopt;
1268  }
1269  }
1270 
1271  auto input_for_location =
1272  [&](uint32_t queried_location) -> const spirv_cross::Resource* {
1273  for (const auto& input : stage_inputs) {
1274  auto location = compiler_->get_decoration(
1275  input.id, spv::Decoration::DecorationLocation);
1276  if (location == queried_location) {
1277  return &input;
1278  }
1279  }
1280  // This really cannot happen with all the validation above.
1281  FML_UNREACHABLE();
1282  return nullptr;
1283  };
1284 
1285  StructDefinition struc;
1286  struc.name = "PerVertexData";
1287  struc.byte_length = 0u;
1288  for (size_t i = 0; i < locations.size(); i++) {
1289  auto resource = input_for_location(i);
1290  if (resource == nullptr) {
1291  return std::nullopt;
1292  }
1293  const auto vertex_type =
1294  VertexTypeFromInputResource(*compiler_.GetCompiler(), resource);
1295 
1296  auto member = StructMember{
1297  vertex_type.type_name, // type
1298  vertex_type.base_type, // base type
1299  vertex_type.variable_name, // name
1300  struc.byte_length, // offset
1301  vertex_type.byte_length, // size
1302  vertex_type.byte_length, // byte_length
1303  std::nullopt, // array_elements
1304  0, // element_padding
1305  };
1306  struc.byte_length += vertex_type.byte_length;
1307  struc.members.emplace_back(std::move(member));
1308  }
1309  return struc;
1310 }
1311 
1312 std::optional<std::string> Reflector::GetMemberNameAtIndexIfExists(
1313  const spirv_cross::SPIRType& parent_type,
1314  size_t index) const {
1315  if (parent_type.type_alias != 0) {
1316  return GetMemberNameAtIndexIfExists(
1317  compiler_->get_type(parent_type.type_alias), index);
1318  }
1319 
1320  if (auto found = ir_->meta.find(parent_type.self); found != ir_->meta.end()) {
1321  const auto& members = found->second.members;
1322  if (index < members.size() && !members[index].alias.empty()) {
1323  return members[index].alias;
1324  }
1325  }
1326  return std::nullopt;
1327 }
1328 
1329 std::string Reflector::GetMemberNameAtIndex(
1330  const spirv_cross::SPIRType& parent_type,
1331  size_t index,
1332  std::string suffix) const {
1333  if (auto name = GetMemberNameAtIndexIfExists(parent_type, index);
1334  name.has_value()) {
1335  return name.value();
1336  }
1337  static std::atomic_size_t sUnnamedMembersID;
1338  std::stringstream stream;
1339  stream << "unnamed_" << sUnnamedMembersID++ << suffix;
1340  return stream.str();
1341 }
1342 
1343 std::vector<Reflector::BindPrototype> Reflector::ReflectBindPrototypes(
1344  const spirv_cross::ShaderResources& resources,
1345  spv::ExecutionModel execution_model) const {
1346  std::vector<BindPrototype> prototypes;
1347  for (const auto& uniform_buffer : resources.uniform_buffers) {
1348  auto& proto = prototypes.emplace_back(BindPrototype{});
1349  proto.return_type = "bool";
1350  proto.name = ToCamelCase(uniform_buffer.name);
1351  proto.descriptor_type = "DescriptorType::kUniformBuffer";
1352  {
1353  std::stringstream stream;
1354  stream << "Bind uniform buffer for resource named " << uniform_buffer.name
1355  << ".";
1356  proto.docstring = stream.str();
1357  }
1358  proto.args.push_back(BindPrototypeArgument{
1359  .type_name = "ResourceBinder&",
1360  .argument_name = "command",
1361  });
1362  proto.args.push_back(BindPrototypeArgument{
1363  .type_name = "BufferView",
1364  .argument_name = "view",
1365  });
1366  }
1367  for (const auto& storage_buffer : resources.storage_buffers) {
1368  auto& proto = prototypes.emplace_back(BindPrototype{});
1369  proto.return_type = "bool";
1370  proto.name = ToCamelCase(storage_buffer.name);
1371  proto.descriptor_type = "DescriptorType::kStorageBuffer";
1372  {
1373  std::stringstream stream;
1374  stream << "Bind storage buffer for resource named " << storage_buffer.name
1375  << ".";
1376  proto.docstring = stream.str();
1377  }
1378  proto.args.push_back(BindPrototypeArgument{
1379  .type_name = "ResourceBinder&",
1380  .argument_name = "command",
1381  });
1382  proto.args.push_back(BindPrototypeArgument{
1383  .type_name = "BufferView",
1384  .argument_name = "view",
1385  });
1386  }
1387  for (const auto& sampled_image : resources.sampled_images) {
1388  auto& proto = prototypes.emplace_back(BindPrototype{});
1389  proto.return_type = "bool";
1390  proto.name = ToCamelCase(sampled_image.name);
1391  proto.descriptor_type = "DescriptorType::kSampledImage";
1392  {
1393  std::stringstream stream;
1394  stream << "Bind combined image sampler for resource named "
1395  << sampled_image.name << ".";
1396  proto.docstring = stream.str();
1397  }
1398  proto.args.push_back(BindPrototypeArgument{
1399  .type_name = "ResourceBinder&",
1400  .argument_name = "command",
1401  });
1402  proto.args.push_back(BindPrototypeArgument{
1403  .type_name = "std::shared_ptr<const Texture>",
1404  .argument_name = "texture",
1405  });
1406  proto.args.push_back(BindPrototypeArgument{
1407  .type_name = "raw_ptr<const Sampler>",
1408  .argument_name = "sampler",
1409  });
1410  }
1411  for (const auto& separate_image : resources.separate_images) {
1412  auto& proto = prototypes.emplace_back(BindPrototype{});
1413  proto.return_type = "bool";
1414  proto.name = ToCamelCase(separate_image.name);
1415  proto.descriptor_type = "DescriptorType::kImage";
1416  {
1417  std::stringstream stream;
1418  stream << "Bind separate image for resource named " << separate_image.name
1419  << ".";
1420  proto.docstring = stream.str();
1421  }
1422  proto.args.push_back(BindPrototypeArgument{
1423  .type_name = "Command&",
1424  .argument_name = "command",
1425  });
1426  proto.args.push_back(BindPrototypeArgument{
1427  .type_name = "std::shared_ptr<const Texture>",
1428  .argument_name = "texture",
1429  });
1430  }
1431  for (const auto& separate_sampler : resources.separate_samplers) {
1432  auto& proto = prototypes.emplace_back(BindPrototype{});
1433  proto.return_type = "bool";
1434  proto.name = ToCamelCase(separate_sampler.name);
1435  proto.descriptor_type = "DescriptorType::kSampler";
1436  {
1437  std::stringstream stream;
1438  stream << "Bind separate sampler for resource named "
1439  << separate_sampler.name << ".";
1440  proto.docstring = stream.str();
1441  }
1442  proto.args.push_back(BindPrototypeArgument{
1443  .type_name = "Command&",
1444  .argument_name = "command",
1445  });
1446  proto.args.push_back(BindPrototypeArgument{
1447  .type_name = "std::shared_ptr<const Sampler>",
1448  .argument_name = "sampler",
1449  });
1450  }
1451  return prototypes;
1452 }
1453 
1454 nlohmann::json::array_t Reflector::EmitBindPrototypes(
1455  const spirv_cross::ShaderResources& resources,
1456  spv::ExecutionModel execution_model) const {
1457  const auto prototypes = ReflectBindPrototypes(resources, execution_model);
1458  nlohmann::json::array_t result;
1459  for (const auto& res : prototypes) {
1460  auto& item = result.emplace_back(nlohmann::json::object_t{});
1461  item["return_type"] = res.return_type;
1462  item["name"] = res.name;
1463  item["docstring"] = res.docstring;
1464  item["descriptor_type"] = res.descriptor_type;
1465  auto& args = item["args"] = nlohmann::json::array_t{};
1466  for (const auto& arg : res.args) {
1467  auto& json_arg = args.emplace_back(nlohmann::json::object_t{});
1468  json_arg["type_name"] = arg.type_name;
1469  json_arg["argument_name"] = arg.argument_name;
1470  }
1471  }
1472  return result;
1473 }
1474 
1475 } // namespace compiler
1476 } // namespace impeller
GLenum type
static const char * kVulkanUBOName
Definition: runtime_stage.h:22
Reflector(Options options, const std::shared_ptr< const spirv_cross::ParsedIR > &ir, const std::shared_ptr< fml::Mapping > &shader_data, const CompilerBackend &compiler)
Definition: reflector.cc:62
std::shared_ptr< fml::Mapping > GetReflectionJSON() const
Definition: reflector.cc:108
std::shared_ptr< fml::Mapping > GetReflectionCC() const
Definition: reflector.cc:125
std::shared_ptr< RuntimeStageData::Shader > GetRuntimeStageShaderData() const
Definition: reflector.cc:129
std::shared_ptr< ShaderBundleData > GetShaderBundleData() const
Definition: reflector.cc:134
std::shared_ptr< fml::Mapping > GetReflectionHeader() const
Definition: reflector.cc:121
int32_t value
Vector2 padding
The halo padding in source space.
static std::optional< KnownType > ReadKnownScalarType(spirv_cross::SPIRType::BaseType type)
Definition: reflector.cc:754
static std::string TypeNameWithPaddingOfSize(size_t size)
Definition: reflector.cc:743
static VertexType VertexTypeFromInputResource(const spirv_cross::Compiler &compiler, const spirv_cross::Resource *resource)
Definition: reflector.cc:1202
static std::string ToString(CompilerBackend::Type type)
Definition: reflector.cc:575
static size_t GetReflectedStructSize(const std::vector< StructMember > &members)
Get the reflected struct size. In the vast majority of the cases, this is the same as the declared st...
Definition: reflector.cc:801
static std::optional< RuntimeStageBackend > GetRuntimeStageBackend(TargetPlatform target_platform)
Definition: reflector.cc:301
static std::string ExecutionModelToString(spv::ExecutionModel model)
Definition: reflector.cc:33
static std::string StringToShaderStage(const std::string &str)
Definition: reflector.cc:46
constexpr std::string_view kReflectionHeaderTemplate
std::string ToCamelCase(std::string_view string)
Definition: utilities.cc:39
constexpr std::string_view kReflectionCCTemplate
bool StringStartsWith(const std::string &target, const std::string &prefix)
Definition: utilities.cc:87
std::vector< spirv_cross::ID > SortUniforms(const spirv_cross::ParsedIR *ir, const spirv_cross::Compiler *compiler, std::optional< spirv_cross::SPIRType::BaseType > type_filter, bool include)
Sorts uniform declarations in an IR according to decoration order.
float Scalar
Definition: scalar.h:19
TPoint< Scalar > Point
Definition: point.h:327
TPoint< int32_t > IPoint32
Definition: point.h:329
std::string SPrintF(const char *format,...)
Definition: strings.cc:12
TPoint< uint32_t > UintPoint32
Definition: point.h:330
Definition: comparable.h:95
A storage only class for half precision floating point.
Definition: half.h:41
spirv_cross::Compiler * GetCompiler()
uint32_t GetExtendedMSLResourceBinding(ExtendedResourceIndex index, spirv_cross::ID id) const
static std::string BaseTypeToString(spirv_cross::SPIRType::BaseType type)
Definition: reflector.h:44
spirv_cross::SPIRType::BaseType base_type
Definition: reflector.cc:1197
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:68
#define VALIDATION_LOG
Definition: validation.h:91