blob: 2f76144e894a1a02d85c9c1cf92a3ef561854350 [file] [log] [blame]
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001/*
Hans-Kristian Arntzen318c17c2019-01-04 12:38:35 +01002 * Copyright 2018-2019 Arm Limited
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "spirv_parser.hpp"
18#include <assert.h>
19
20using namespace std;
21using namespace spv;
22
23namespace spirv_cross
24{
25Parser::Parser(std::vector<uint32_t> spirv)
26{
27 ir.spirv = move(spirv);
28}
29
30Parser::Parser(const uint32_t *spirv_data, size_t word_count)
31{
32 ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
33}
34
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +010035static bool decoration_is_string(Decoration decoration)
36{
37 switch (decoration)
38 {
39 case DecorationHlslSemanticGOOGLE:
40 return true;
41
42 default:
43 return false;
44 }
45}
46
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020047static inline uint32_t swap_endian(uint32_t v)
48{
49 return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
50}
51
52static bool is_valid_spirv_version(uint32_t version)
53{
54 switch (version)
55 {
56 // Allow v99 since it tends to just work.
57 case 99:
58 case 0x10000: // SPIR-V 1.0
59 case 0x10100: // SPIR-V 1.1
60 case 0x10200: // SPIR-V 1.2
61 case 0x10300: // SPIR-V 1.3
62 return true;
63
64 default:
65 return false;
66 }
67}
68
69void Parser::parse()
70{
71 auto &spirv = ir.spirv;
72
73 auto len = spirv.size();
74 if (len < 5)
75 SPIRV_CROSS_THROW("SPIRV file too small.");
76
77 auto s = spirv.data();
78
79 // Endian-swap if we need to.
80 if (s[0] == swap_endian(MagicNumber))
81 transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
82
83 if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
84 SPIRV_CROSS_THROW("Invalid SPIRV format.");
85
86 uint32_t bound = s[3];
87 ir.set_id_bounds(bound);
88
89 uint32_t offset = 5;
90
91 vector<Instruction> instructions;
92 while (offset < len)
93 {
94 Instruction instr = {};
95 instr.op = spirv[offset] & 0xffff;
96 instr.count = (spirv[offset] >> 16) & 0xffff;
97
98 if (instr.count == 0)
99 SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
100
101 instr.offset = offset + 1;
102 instr.length = instr.count - 1;
103
104 offset += instr.count;
105
106 if (offset > spirv.size())
107 SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
108
109 instructions.push_back(instr);
110 }
111
112 for (auto &i : instructions)
113 parse(i);
114
115 if (current_function)
116 SPIRV_CROSS_THROW("Function was not terminated.");
117 if (current_block)
118 SPIRV_CROSS_THROW("Block was not terminated.");
119}
120
121const uint32_t *Parser::stream(const Instruction &instr) const
122{
123 // If we're not going to use any arguments, just return nullptr.
124 // We want to avoid case where we return an out of range pointer
125 // that trips debug assertions on some platforms.
126 if (!instr.length)
127 return nullptr;
128
129 if (instr.offset + instr.length > ir.spirv.size())
130 SPIRV_CROSS_THROW("Compiler::stream() out of range.");
131 return &ir.spirv[instr.offset];
132}
133
134static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
135{
136 string ret;
137 for (uint32_t i = offset; i < spirv.size(); i++)
138 {
139 uint32_t w = spirv[i];
140
141 for (uint32_t j = 0; j < 4; j++, w >>= 8)
142 {
143 char c = w & 0xff;
144 if (c == '\0')
145 return ret;
146 ret += c;
147 }
148 }
149
150 SPIRV_CROSS_THROW("String was not terminated before EOF");
151}
152
153void Parser::parse(const Instruction &instruction)
154{
155 auto *ops = stream(instruction);
156 auto op = static_cast<Op>(instruction.op);
157 uint32_t length = instruction.length;
158
159 switch (op)
160 {
161 case OpMemoryModel:
lifpan00a765e2018-11-15 09:04:36 +0800162 case OpSourceContinued:
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200163 case OpSourceExtension:
164 case OpNop:
165 case OpLine:
166 case OpNoLine:
167 case OpString:
lifpan91610962018-11-13 14:28:38 +0800168 case OpModuleProcessed:
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200169 break;
170
171 case OpSource:
172 {
173 auto lang = static_cast<SourceLanguage>(ops[0]);
174 switch (lang)
175 {
176 case SourceLanguageESSL:
177 ir.source.es = true;
178 ir.source.version = ops[1];
179 ir.source.known = true;
180 ir.source.hlsl = false;
181 break;
182
183 case SourceLanguageGLSL:
184 ir.source.es = false;
185 ir.source.version = ops[1];
186 ir.source.known = true;
187 ir.source.hlsl = false;
188 break;
189
190 case SourceLanguageHLSL:
191 // For purposes of cross-compiling, this is GLSL 450.
192 ir.source.es = false;
193 ir.source.version = 450;
194 ir.source.known = true;
195 ir.source.hlsl = true;
196 break;
197
198 default:
199 ir.source.known = false;
200 break;
201 }
202 break;
203 }
204
205 case OpUndef:
206 {
207 uint32_t result_type = ops[0];
208 uint32_t id = ops[1];
209 set<SPIRUndef>(id, result_type);
210 break;
211 }
212
213 case OpCapability:
214 {
215 uint32_t cap = ops[0];
216 if (cap == CapabilityKernel)
217 SPIRV_CROSS_THROW("Kernel capability not supported.");
218
219 ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
220 break;
221 }
222
223 case OpExtension:
224 {
225 auto ext = extract_string(ir.spirv, instruction.offset);
226 ir.declared_extensions.push_back(move(ext));
227 break;
228 }
229
230 case OpExtInstImport:
231 {
232 uint32_t id = ops[0];
233 auto ext = extract_string(ir.spirv, instruction.offset + 1);
234 if (ext == "GLSL.std.450")
235 set<SPIRExtension>(id, SPIRExtension::GLSL);
236 else if (ext == "SPV_AMD_shader_ballot")
237 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_ballot);
238 else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
239 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter);
240 else if (ext == "SPV_AMD_shader_trinary_minmax")
241 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_trinary_minmax);
242 else if (ext == "SPV_AMD_gcn_shader")
243 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_gcn_shader);
244 else
245 set<SPIRExtension>(id, SPIRExtension::Unsupported);
246
247 // Other SPIR-V extensions which have ExtInstrs are currently not supported.
248
249 break;
250 }
251
252 case OpEntryPoint:
253 {
254 auto itr =
255 ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
256 extract_string(ir.spirv, instruction.offset + 2))));
257 auto &e = itr.first->second;
258
259 // Strings need nul-terminator and consume the whole word.
260 uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
261 e.interface_variables.insert(end(e.interface_variables), ops + strlen_words + 2, ops + instruction.length);
262
263 // Set the name of the entry point in case OpName is not provided later.
264 ir.set_name(ops[1], e.name);
265
266 // If we don't have an entry, make the first one our "default".
267 if (!ir.default_entry_point)
268 ir.default_entry_point = ops[1];
269 break;
270 }
271
272 case OpExecutionMode:
273 {
274 auto &execution = ir.entry_points[ops[0]];
275 auto mode = static_cast<ExecutionMode>(ops[1]);
276 execution.flags.set(mode);
277
278 switch (mode)
279 {
280 case ExecutionModeInvocations:
281 execution.invocations = ops[2];
282 break;
283
284 case ExecutionModeLocalSize:
285 execution.workgroup_size.x = ops[2];
286 execution.workgroup_size.y = ops[3];
287 execution.workgroup_size.z = ops[4];
288 break;
289
290 case ExecutionModeOutputVertices:
291 execution.output_vertices = ops[2];
292 break;
293
294 default:
295 break;
296 }
297 break;
298 }
299
300 case OpName:
301 {
302 uint32_t id = ops[0];
303 ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
304 break;
305 }
306
307 case OpMemberName:
308 {
309 uint32_t id = ops[0];
310 uint32_t member = ops[1];
311 ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
312 break;
313 }
314
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +0100315 case OpDecorationGroup:
316 {
317 // Noop, this simply means an ID should be a collector of decorations.
318 // The meta array is already a flat array of decorations which will contain the relevant decorations.
319 break;
320 }
321
322 case OpGroupDecorate:
323 {
324 uint32_t group_id = ops[0];
325 auto &decorations = ir.meta[group_id].decoration;
326 auto &flags = decorations.decoration_flags;
327
328 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
329 // i.e., we cannot just copy the meta structure directly.
330 for (uint32_t i = 1; i < length; i++)
331 {
332 uint32_t target = ops[i];
333 flags.for_each_bit([&](uint32_t bit) {
334 auto decoration = static_cast<Decoration>(bit);
335
336 if (decoration_is_string(decoration))
337 {
338 ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
339 }
340 else
341 {
342 ir.meta[target].decoration_word_offset[decoration] =
343 ir.meta[group_id].decoration_word_offset[decoration];
344 ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
345 }
346 });
347 }
348 break;
349 }
350
351 case OpGroupMemberDecorate:
352 {
353 uint32_t group_id = ops[0];
354 auto &flags = ir.meta[group_id].decoration.decoration_flags;
355
356 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
357 // i.e., we cannot just copy the meta structure directly.
358 for (uint32_t i = 1; i + 1 < length; i += 2)
359 {
360 uint32_t target = ops[i + 0];
361 uint32_t index = ops[i + 1];
362 flags.for_each_bit([&](uint32_t bit) {
363 auto decoration = static_cast<Decoration>(bit);
364
365 if (decoration_is_string(decoration))
366 ir.set_member_decoration_string(target, index, decoration,
367 ir.get_decoration_string(group_id, decoration));
368 else
369 ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
370 });
371 }
372 break;
373 }
374
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200375 case OpDecorate:
376 case OpDecorateId:
377 {
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +0100378 // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
379 // so merge decorate and decorate-id here.
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200380 uint32_t id = ops[0];
381
382 auto decoration = static_cast<Decoration>(ops[1]);
383 if (length >= 3)
384 {
385 ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
386 ir.set_decoration(id, decoration, ops[2]);
387 }
388 else
389 ir.set_decoration(id, decoration);
390
391 break;
392 }
393
394 case OpDecorateStringGOOGLE:
395 {
396 uint32_t id = ops[0];
397 auto decoration = static_cast<Decoration>(ops[1]);
398 ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
399 break;
400 }
401
402 case OpMemberDecorate:
403 {
404 uint32_t id = ops[0];
405 uint32_t member = ops[1];
406 auto decoration = static_cast<Decoration>(ops[2]);
407 if (length >= 4)
408 ir.set_member_decoration(id, member, decoration, ops[3]);
409 else
410 ir.set_member_decoration(id, member, decoration);
411 break;
412 }
413
414 case OpMemberDecorateStringGOOGLE:
415 {
416 uint32_t id = ops[0];
417 uint32_t member = ops[1];
418 auto decoration = static_cast<Decoration>(ops[2]);
419 ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
420 break;
421 }
422
423 // Build up basic types.
424 case OpTypeVoid:
425 {
426 uint32_t id = ops[0];
427 auto &type = set<SPIRType>(id);
428 type.basetype = SPIRType::Void;
429 break;
430 }
431
432 case OpTypeBool:
433 {
434 uint32_t id = ops[0];
435 auto &type = set<SPIRType>(id);
436 type.basetype = SPIRType::Boolean;
437 type.width = 1;
438 break;
439 }
440
441 case OpTypeFloat:
442 {
443 uint32_t id = ops[0];
444 uint32_t width = ops[1];
445 auto &type = set<SPIRType>(id);
446 if (width == 64)
447 type.basetype = SPIRType::Double;
448 else if (width == 32)
449 type.basetype = SPIRType::Float;
450 else if (width == 16)
451 type.basetype = SPIRType::Half;
452 else
453 SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
454 type.width = width;
455 break;
456 }
457
458 case OpTypeInt:
459 {
460 uint32_t id = ops[0];
461 uint32_t width = ops[1];
lifpanb21525b2018-11-28 14:20:24 +0800462 bool signedness = ops[2] != 0;
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200463 auto &type = set<SPIRType>(id);
Chip Davis117ccf42018-11-01 17:20:07 -0500464 switch (width)
465 {
466 case 64:
467 type.basetype = signedness ? SPIRType::Int64 : SPIRType::UInt64;
468 break;
469 case 32:
470 type.basetype = signedness ? SPIRType::Int : SPIRType::UInt;
471 break;
472 case 16:
473 type.basetype = signedness ? SPIRType::Short : SPIRType::UShort;
474 break;
475 case 8:
476 type.basetype = signedness ? SPIRType::SByte : SPIRType::UByte;
477 break;
478 default:
479 SPIRV_CROSS_THROW("Unrecognized bit-width of integral type.");
480 }
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200481 type.width = width;
482 break;
483 }
484
485 // Build composite types by "inheriting".
486 // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
487 // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
488 case OpTypeVector:
489 {
490 uint32_t id = ops[0];
491 uint32_t vecsize = ops[2];
492
493 auto &base = get<SPIRType>(ops[1]);
494 auto &vecbase = set<SPIRType>(id);
495
496 vecbase = base;
497 vecbase.vecsize = vecsize;
498 vecbase.self = id;
499 vecbase.parent_type = ops[1];
500 break;
501 }
502
503 case OpTypeMatrix:
504 {
505 uint32_t id = ops[0];
506 uint32_t colcount = ops[2];
507
508 auto &base = get<SPIRType>(ops[1]);
509 auto &matrixbase = set<SPIRType>(id);
510
511 matrixbase = base;
512 matrixbase.columns = colcount;
513 matrixbase.self = id;
514 matrixbase.parent_type = ops[1];
515 break;
516 }
517
518 case OpTypeArray:
519 {
520 uint32_t id = ops[0];
521 auto &arraybase = set<SPIRType>(id);
522
523 uint32_t tid = ops[1];
524 auto &base = get<SPIRType>(tid);
525
526 arraybase = base;
527 arraybase.parent_type = tid;
528
529 uint32_t cid = ops[2];
530 ir.mark_used_as_array_length(cid);
531 auto *c = maybe_get<SPIRConstant>(cid);
532 bool literal = c && !c->specialization;
533
534 arraybase.array_size_literal.push_back(literal);
535 arraybase.array.push_back(literal ? c->scalar() : cid);
536 // Do NOT set arraybase.self!
537 break;
538 }
539
540 case OpTypeRuntimeArray:
541 {
542 uint32_t id = ops[0];
543
544 auto &base = get<SPIRType>(ops[1]);
545 auto &arraybase = set<SPIRType>(id);
546
547 arraybase = base;
548 arraybase.array.push_back(0);
549 arraybase.array_size_literal.push_back(true);
550 arraybase.parent_type = ops[1];
551 // Do NOT set arraybase.self!
552 break;
553 }
554
555 case OpTypeImage:
556 {
557 uint32_t id = ops[0];
558 auto &type = set<SPIRType>(id);
559 type.basetype = SPIRType::Image;
560 type.image.type = ops[1];
561 type.image.dim = static_cast<Dim>(ops[2]);
562 type.image.depth = ops[3] == 1;
563 type.image.arrayed = ops[4] != 0;
564 type.image.ms = ops[5] != 0;
565 type.image.sampled = ops[6];
566 type.image.format = static_cast<ImageFormat>(ops[7]);
567 type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
568
569 if (type.image.sampled == 0)
570 SPIRV_CROSS_THROW("OpTypeImage Sampled parameter must not be zero.");
571
572 break;
573 }
574
575 case OpTypeSampledImage:
576 {
577 uint32_t id = ops[0];
578 uint32_t imagetype = ops[1];
579 auto &type = set<SPIRType>(id);
580 type = get<SPIRType>(imagetype);
581 type.basetype = SPIRType::SampledImage;
582 type.self = id;
583 break;
584 }
585
586 case OpTypeSampler:
587 {
588 uint32_t id = ops[0];
589 auto &type = set<SPIRType>(id);
590 type.basetype = SPIRType::Sampler;
591 break;
592 }
593
594 case OpTypePointer:
595 {
596 uint32_t id = ops[0];
597
598 auto &base = get<SPIRType>(ops[2]);
599 auto &ptrbase = set<SPIRType>(id);
600
601 ptrbase = base;
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200602 ptrbase.pointer = true;
Hans-Kristian Arntzend0b93722018-11-26 12:23:28 +0100603 ptrbase.pointer_depth++;
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200604 ptrbase.storage = static_cast<StorageClass>(ops[1]);
605
606 if (ptrbase.storage == StorageClassAtomicCounter)
607 ptrbase.basetype = SPIRType::AtomicCounter;
608
609 ptrbase.parent_type = ops[2];
610
611 // Do NOT set ptrbase.self!
612 break;
613 }
614
615 case OpTypeStruct:
616 {
617 uint32_t id = ops[0];
618 auto &type = set<SPIRType>(id);
619 type.basetype = SPIRType::Struct;
620 for (uint32_t i = 1; i < length; i++)
621 type.member_types.push_back(ops[i]);
622
623 // Check if we have seen this struct type before, with just different
624 // decorations.
625 //
626 // Add workaround for issue #17 as well by looking at OpName for the struct
627 // types, which we shouldn't normally do.
628 // We should not normally have to consider type aliases like this to begin with
629 // however ... glslang issues #304, #307 cover this.
630
631 // For stripped names, never consider struct type aliasing.
632 // We risk declaring the same struct multiple times, but type-punning is not allowed
633 // so this is safe.
634 bool consider_aliasing = !ir.get_name(type.self).empty();
635 if (consider_aliasing)
636 {
637 for (auto &other : global_struct_cache)
638 {
639 if (ir.get_name(type.self) == ir.get_name(other) &&
640 types_are_logically_equivalent(type, get<SPIRType>(other)))
641 {
642 type.type_alias = other;
643 break;
644 }
645 }
646
647 if (type.type_alias == 0)
648 global_struct_cache.push_back(id);
649 }
650 break;
651 }
652
653 case OpTypeFunction:
654 {
655 uint32_t id = ops[0];
656 uint32_t ret = ops[1];
657
658 auto &func = set<SPIRFunctionPrototype>(id, ret);
659 for (uint32_t i = 2; i < length; i++)
660 func.parameter_types.push_back(ops[i]);
661 break;
662 }
663
664 // Variable declaration
665 // All variables are essentially pointers with a storage qualifier.
666 case OpVariable:
667 {
668 uint32_t type = ops[0];
669 uint32_t id = ops[1];
670 auto storage = static_cast<StorageClass>(ops[2]);
671 uint32_t initializer = length == 4 ? ops[3] : 0;
672
673 if (storage == StorageClassFunction)
674 {
675 if (!current_function)
676 SPIRV_CROSS_THROW("No function currently in scope");
677 current_function->add_local_variable(id);
678 }
679
680 set<SPIRVariable>(id, type, storage, initializer);
681
682 // hlsl based shaders don't have those decorations. force them and then reset when reading/writing images
683 auto &ttype = get<SPIRType>(type);
684 if (ttype.basetype == SPIRType::BaseType::Image)
685 {
686 ir.set_decoration(id, DecorationNonWritable);
687 ir.set_decoration(id, DecorationNonReadable);
688 }
689
690 break;
691 }
692
693 // OpPhi
694 // OpPhi is a fairly magical opcode.
695 // It selects temporary variables based on which parent block we *came from*.
696 // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
697 // variable to emulate SSA Phi.
698 case OpPhi:
699 {
700 if (!current_function)
701 SPIRV_CROSS_THROW("No function currently in scope");
702 if (!current_block)
703 SPIRV_CROSS_THROW("No block currently in scope");
704
705 uint32_t result_type = ops[0];
706 uint32_t id = ops[1];
707
708 // Instead of a temporary, create a new function-wide temporary with this ID instead.
709 auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction);
710 var.phi_variable = true;
711
712 current_function->add_local_variable(id);
713
714 for (uint32_t i = 2; i + 2 <= length; i += 2)
715 current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
716 break;
717 }
718
719 // Constants
720 case OpSpecConstant:
721 case OpConstant:
722 {
723 uint32_t id = ops[1];
724 auto &type = get<SPIRType>(ops[0]);
725
726 if (type.width > 32)
727 set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
728 else
729 set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
730 break;
731 }
732
733 case OpSpecConstantFalse:
734 case OpConstantFalse:
735 {
736 uint32_t id = ops[1];
737 set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
738 break;
739 }
740
741 case OpSpecConstantTrue:
742 case OpConstantTrue:
743 {
744 uint32_t id = ops[1];
745 set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
746 break;
747 }
748
749 case OpConstantNull:
750 {
751 uint32_t id = ops[1];
752 uint32_t type = ops[0];
753 make_constant_null(id, type);
754 break;
755 }
756
757 case OpSpecConstantComposite:
758 case OpConstantComposite:
759 {
760 uint32_t id = ops[1];
761 uint32_t type = ops[0];
762
763 auto &ctype = get<SPIRType>(type);
764
765 // We can have constants which are structs and arrays.
766 // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
767 // can refer to.
768 if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
769 {
770 set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
771 }
772 else
773 {
774 uint32_t elements = length - 2;
775 if (elements > 4)
776 SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
777
778 SPIRConstant remapped_constant_ops[4];
779 const SPIRConstant *c[4];
780 for (uint32_t i = 0; i < elements; i++)
781 {
782 // Specialization constants operations can also be part of this.
783 // We do not know their value, so any attempt to query SPIRConstant later
784 // will fail. We can only propagate the ID of the expression and use to_expression on it.
785 auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
786 if (constant_op)
787 {
788 if (op == OpConstantComposite)
789 SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
790
791 remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
792 remapped_constant_ops[i].self = constant_op->self;
793 remapped_constant_ops[i].constant_type = constant_op->basetype;
794 remapped_constant_ops[i].specialization = true;
795 c[i] = &remapped_constant_ops[i];
796 }
797 else
798 c[i] = &get<SPIRConstant>(ops[2 + i]);
799 }
800 set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
801 }
802 break;
803 }
804
805 // Functions
806 case OpFunction:
807 {
808 uint32_t res = ops[0];
809 uint32_t id = ops[1];
810 // Control
811 uint32_t type = ops[3];
812
813 if (current_function)
814 SPIRV_CROSS_THROW("Must end a function before starting a new one!");
815
816 current_function = &set<SPIRFunction>(id, res, type);
817 break;
818 }
819
820 case OpFunctionParameter:
821 {
822 uint32_t type = ops[0];
823 uint32_t id = ops[1];
824
825 if (!current_function)
826 SPIRV_CROSS_THROW("Must be in a function!");
827
828 current_function->add_parameter(type, id);
829 set<SPIRVariable>(id, type, StorageClassFunction);
830 break;
831 }
832
833 case OpFunctionEnd:
834 {
835 if (current_block)
836 {
837 // Very specific error message, but seems to come up quite often.
838 SPIRV_CROSS_THROW(
839 "Cannot end a function before ending the current block.\n"
840 "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
841 }
842 current_function = nullptr;
843 break;
844 }
845
846 // Blocks
847 case OpLabel:
848 {
849 // OpLabel always starts a block.
850 if (!current_function)
851 SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
852
853 uint32_t id = ops[0];
854
855 current_function->blocks.push_back(id);
856 if (!current_function->entry_block)
857 current_function->entry_block = id;
858
859 if (current_block)
860 SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
861
862 current_block = &set<SPIRBlock>(id);
863 break;
864 }
865
866 // Branch instructions end blocks.
867 case OpBranch:
868 {
869 if (!current_block)
870 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
871
872 uint32_t target = ops[0];
873 current_block->terminator = SPIRBlock::Direct;
874 current_block->next_block = target;
875 current_block = nullptr;
876 break;
877 }
878
879 case OpBranchConditional:
880 {
881 if (!current_block)
882 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
883
884 current_block->condition = ops[0];
885 current_block->true_block = ops[1];
886 current_block->false_block = ops[2];
887
888 current_block->terminator = SPIRBlock::Select;
889 current_block = nullptr;
890 break;
891 }
892
893 case OpSwitch:
894 {
895 if (!current_block)
896 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
897
898 if (current_block->merge == SPIRBlock::MergeNone)
899 SPIRV_CROSS_THROW("Switch statement is not structured");
900
901 current_block->terminator = SPIRBlock::MultiSelect;
902
903 current_block->condition = ops[0];
904 current_block->default_block = ops[1];
905
906 for (uint32_t i = 2; i + 2 <= length; i += 2)
907 current_block->cases.push_back({ ops[i], ops[i + 1] });
908
909 // If we jump to next block, make it break instead since we're inside a switch case block at that point.
910 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
911
912 current_block = nullptr;
913 break;
914 }
915
916 case OpKill:
917 {
918 if (!current_block)
919 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
920 current_block->terminator = SPIRBlock::Kill;
921 current_block = nullptr;
922 break;
923 }
924
925 case OpReturn:
926 {
927 if (!current_block)
928 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
929 current_block->terminator = SPIRBlock::Return;
930 current_block = nullptr;
931 break;
932 }
933
934 case OpReturnValue:
935 {
936 if (!current_block)
937 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
938 current_block->terminator = SPIRBlock::Return;
939 current_block->return_value = ops[0];
940 current_block = nullptr;
941 break;
942 }
943
944 case OpUnreachable:
945 {
946 if (!current_block)
947 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
948 current_block->terminator = SPIRBlock::Unreachable;
949 current_block = nullptr;
950 break;
951 }
952
953 case OpSelectionMerge:
954 {
955 if (!current_block)
956 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
957
958 current_block->next_block = ops[0];
959 current_block->merge = SPIRBlock::MergeSelection;
960 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
961
962 if (length >= 2)
963 {
964 if (ops[1] & SelectionControlFlattenMask)
965 current_block->hint = SPIRBlock::HintFlatten;
966 else if (ops[1] & SelectionControlDontFlattenMask)
967 current_block->hint = SPIRBlock::HintDontFlatten;
968 }
969 break;
970 }
971
972 case OpLoopMerge:
973 {
974 if (!current_block)
975 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
976
977 current_block->merge_block = ops[0];
978 current_block->continue_block = ops[1];
979 current_block->merge = SPIRBlock::MergeLoop;
980
981 ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
982 ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
983
984 ir.continue_block_to_loop_header[current_block->continue_block] = current_block->self;
985
986 // Don't add loop headers to continue blocks,
987 // which would make it impossible branch into the loop header since
988 // they are treated as continues.
989 if (current_block->continue_block != current_block->self)
990 ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
991
992 if (length >= 3)
993 {
994 if (ops[2] & LoopControlUnrollMask)
995 current_block->hint = SPIRBlock::HintUnroll;
996 else if (ops[2] & LoopControlDontUnrollMask)
997 current_block->hint = SPIRBlock::HintDontUnroll;
998 }
999 break;
1000 }
1001
1002 case OpSpecConstantOp:
1003 {
1004 if (length < 3)
1005 SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1006
1007 uint32_t result_type = ops[0];
1008 uint32_t id = ops[1];
1009 auto spec_op = static_cast<Op>(ops[2]);
1010
1011 set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
1012 break;
1013 }
1014
1015 // Actual opcodes.
1016 default:
1017 {
1018 if (!current_block)
1019 SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1020
1021 current_block->ops.push_back(instruction);
1022 break;
1023 }
1024 }
1025}
1026
1027bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
1028{
1029 if (a.basetype != b.basetype)
1030 return false;
1031 if (a.width != b.width)
1032 return false;
1033 if (a.vecsize != b.vecsize)
1034 return false;
1035 if (a.columns != b.columns)
1036 return false;
1037 if (a.array.size() != b.array.size())
1038 return false;
1039
1040 size_t array_count = a.array.size();
1041 if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
1042 return false;
1043
1044 if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
1045 {
1046 if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
1047 return false;
1048 }
1049
1050 if (a.member_types.size() != b.member_types.size())
1051 return false;
1052
1053 size_t member_types = a.member_types.size();
1054 for (size_t i = 0; i < member_types; i++)
1055 {
1056 if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
1057 return false;
1058 }
1059
1060 return true;
1061}
1062
1063bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
1064{
1065 auto &type = get<SPIRType>(v.basetype);
Hans-Kristian Arntzenb6298782019-01-10 14:04:01 +01001066
1067 auto *type_meta = ir.find_meta(type.self);
1068
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001069 bool ssbo = v.storage == StorageClassStorageBuffer ||
Hans-Kristian Arntzenb6298782019-01-10 14:04:01 +01001070 (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001071 bool image = type.basetype == SPIRType::Image;
1072 bool counter = type.basetype == SPIRType::AtomicCounter;
1073
1074 bool is_restrict;
1075 if (ssbo)
1076 is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
1077 else
1078 is_restrict = ir.has_decoration(v.self, DecorationRestrict);
1079
1080 return !is_restrict && (ssbo || image || counter);
1081}
1082
1083void Parser::make_constant_null(uint32_t id, uint32_t type)
1084{
1085 auto &constant_type = get<SPIRType>(type);
1086
Chip Davis3bfb2f92018-12-03 02:06:33 -06001087 if (constant_type.pointer)
1088 {
1089 auto &constant = set<SPIRConstant>(id, type);
1090 constant.make_null(constant_type);
1091 }
1092 else if (!constant_type.array.empty())
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001093 {
1094 assert(constant_type.parent_type);
1095 uint32_t parent_id = ir.increase_bound_by(1);
1096 make_constant_null(parent_id, constant_type.parent_type);
1097
1098 if (!constant_type.array_size_literal.back())
1099 SPIRV_CROSS_THROW("Array size of OpConstantNull must be a literal.");
1100
1101 vector<uint32_t> elements(constant_type.array.back());
1102 for (uint32_t i = 0; i < constant_type.array.back(); i++)
1103 elements[i] = parent_id;
1104 set<SPIRConstant>(id, type, elements.data(), uint32_t(elements.size()), false);
1105 }
1106 else if (!constant_type.member_types.empty())
1107 {
1108 uint32_t member_ids = ir.increase_bound_by(uint32_t(constant_type.member_types.size()));
1109 vector<uint32_t> elements(constant_type.member_types.size());
1110 for (uint32_t i = 0; i < constant_type.member_types.size(); i++)
1111 {
1112 make_constant_null(member_ids + i, constant_type.member_types[i]);
1113 elements[i] = member_ids + i;
1114 }
1115 set<SPIRConstant>(id, type, elements.data(), uint32_t(elements.size()), false);
1116 }
1117 else
1118 {
1119 auto &constant = set<SPIRConstant>(id, type);
1120 constant.make_null(constant_type);
1121 }
1122}
1123
Chip Davis117ccf42018-11-01 17:20:07 -05001124} // namespace spirv_cross