blob: 0e611fe248f2aaf92d04421a861a885ee5baa8a7 [file] [log] [blame]
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001/*
2 * Copyright 2018 Arm Limited
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "spirv_parser.hpp"
18#include <assert.h>
19
20using namespace std;
21using namespace spv;
22
23namespace spirv_cross
24{
25Parser::Parser(std::vector<uint32_t> spirv)
26{
27 ir.spirv = move(spirv);
28}
29
30Parser::Parser(const uint32_t *spirv_data, size_t word_count)
31{
32 ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
33}
34
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +010035static bool decoration_is_string(Decoration decoration)
36{
37 switch (decoration)
38 {
39 case DecorationHlslSemanticGOOGLE:
40 return true;
41
42 default:
43 return false;
44 }
45}
46
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020047static inline uint32_t swap_endian(uint32_t v)
48{
49 return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
50}
51
52static bool is_valid_spirv_version(uint32_t version)
53{
54 switch (version)
55 {
56 // Allow v99 since it tends to just work.
57 case 99:
58 case 0x10000: // SPIR-V 1.0
59 case 0x10100: // SPIR-V 1.1
60 case 0x10200: // SPIR-V 1.2
61 case 0x10300: // SPIR-V 1.3
62 return true;
63
64 default:
65 return false;
66 }
67}
68
69void Parser::parse()
70{
71 auto &spirv = ir.spirv;
72
73 auto len = spirv.size();
74 if (len < 5)
75 SPIRV_CROSS_THROW("SPIRV file too small.");
76
77 auto s = spirv.data();
78
79 // Endian-swap if we need to.
80 if (s[0] == swap_endian(MagicNumber))
81 transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
82
83 if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
84 SPIRV_CROSS_THROW("Invalid SPIRV format.");
85
86 uint32_t bound = s[3];
87 ir.set_id_bounds(bound);
88
89 uint32_t offset = 5;
90
91 vector<Instruction> instructions;
92 while (offset < len)
93 {
94 Instruction instr = {};
95 instr.op = spirv[offset] & 0xffff;
96 instr.count = (spirv[offset] >> 16) & 0xffff;
97
98 if (instr.count == 0)
99 SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
100
101 instr.offset = offset + 1;
102 instr.length = instr.count - 1;
103
104 offset += instr.count;
105
106 if (offset > spirv.size())
107 SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
108
109 instructions.push_back(instr);
110 }
111
112 for (auto &i : instructions)
113 parse(i);
114
115 if (current_function)
116 SPIRV_CROSS_THROW("Function was not terminated.");
117 if (current_block)
118 SPIRV_CROSS_THROW("Block was not terminated.");
119}
120
121const uint32_t *Parser::stream(const Instruction &instr) const
122{
123 // If we're not going to use any arguments, just return nullptr.
124 // We want to avoid case where we return an out of range pointer
125 // that trips debug assertions on some platforms.
126 if (!instr.length)
127 return nullptr;
128
129 if (instr.offset + instr.length > ir.spirv.size())
130 SPIRV_CROSS_THROW("Compiler::stream() out of range.");
131 return &ir.spirv[instr.offset];
132}
133
134static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
135{
136 string ret;
137 for (uint32_t i = offset; i < spirv.size(); i++)
138 {
139 uint32_t w = spirv[i];
140
141 for (uint32_t j = 0; j < 4; j++, w >>= 8)
142 {
143 char c = w & 0xff;
144 if (c == '\0')
145 return ret;
146 ret += c;
147 }
148 }
149
150 SPIRV_CROSS_THROW("String was not terminated before EOF");
151}
152
153void Parser::parse(const Instruction &instruction)
154{
155 auto *ops = stream(instruction);
156 auto op = static_cast<Op>(instruction.op);
157 uint32_t length = instruction.length;
158
159 switch (op)
160 {
161 case OpMemoryModel:
lifpan00a765e2018-11-15 09:04:36 +0800162 case OpSourceContinued:
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200163 case OpSourceExtension:
164 case OpNop:
165 case OpLine:
166 case OpNoLine:
167 case OpString:
lifpan91610962018-11-13 14:28:38 +0800168 case OpModuleProcessed:
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200169 break;
170
171 case OpSource:
172 {
173 auto lang = static_cast<SourceLanguage>(ops[0]);
174 switch (lang)
175 {
176 case SourceLanguageESSL:
177 ir.source.es = true;
178 ir.source.version = ops[1];
179 ir.source.known = true;
180 ir.source.hlsl = false;
181 break;
182
183 case SourceLanguageGLSL:
184 ir.source.es = false;
185 ir.source.version = ops[1];
186 ir.source.known = true;
187 ir.source.hlsl = false;
188 break;
189
190 case SourceLanguageHLSL:
191 // For purposes of cross-compiling, this is GLSL 450.
192 ir.source.es = false;
193 ir.source.version = 450;
194 ir.source.known = true;
195 ir.source.hlsl = true;
196 break;
197
198 default:
199 ir.source.known = false;
200 break;
201 }
202 break;
203 }
204
205 case OpUndef:
206 {
207 uint32_t result_type = ops[0];
208 uint32_t id = ops[1];
209 set<SPIRUndef>(id, result_type);
210 break;
211 }
212
213 case OpCapability:
214 {
215 uint32_t cap = ops[0];
216 if (cap == CapabilityKernel)
217 SPIRV_CROSS_THROW("Kernel capability not supported.");
218
219 ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
220 break;
221 }
222
223 case OpExtension:
224 {
225 auto ext = extract_string(ir.spirv, instruction.offset);
226 ir.declared_extensions.push_back(move(ext));
227 break;
228 }
229
230 case OpExtInstImport:
231 {
232 uint32_t id = ops[0];
233 auto ext = extract_string(ir.spirv, instruction.offset + 1);
234 if (ext == "GLSL.std.450")
235 set<SPIRExtension>(id, SPIRExtension::GLSL);
236 else if (ext == "SPV_AMD_shader_ballot")
237 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_ballot);
238 else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
239 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter);
240 else if (ext == "SPV_AMD_shader_trinary_minmax")
241 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_trinary_minmax);
242 else if (ext == "SPV_AMD_gcn_shader")
243 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_gcn_shader);
244 else
245 set<SPIRExtension>(id, SPIRExtension::Unsupported);
246
247 // Other SPIR-V extensions which have ExtInstrs are currently not supported.
248
249 break;
250 }
251
252 case OpEntryPoint:
253 {
254 auto itr =
255 ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
256 extract_string(ir.spirv, instruction.offset + 2))));
257 auto &e = itr.first->second;
258
259 // Strings need nul-terminator and consume the whole word.
260 uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
261 e.interface_variables.insert(end(e.interface_variables), ops + strlen_words + 2, ops + instruction.length);
262
263 // Set the name of the entry point in case OpName is not provided later.
264 ir.set_name(ops[1], e.name);
265
266 // If we don't have an entry, make the first one our "default".
267 if (!ir.default_entry_point)
268 ir.default_entry_point = ops[1];
269 break;
270 }
271
272 case OpExecutionMode:
273 {
274 auto &execution = ir.entry_points[ops[0]];
275 auto mode = static_cast<ExecutionMode>(ops[1]);
276 execution.flags.set(mode);
277
278 switch (mode)
279 {
280 case ExecutionModeInvocations:
281 execution.invocations = ops[2];
282 break;
283
284 case ExecutionModeLocalSize:
285 execution.workgroup_size.x = ops[2];
286 execution.workgroup_size.y = ops[3];
287 execution.workgroup_size.z = ops[4];
288 break;
289
290 case ExecutionModeOutputVertices:
291 execution.output_vertices = ops[2];
292 break;
293
294 default:
295 break;
296 }
297 break;
298 }
299
300 case OpName:
301 {
302 uint32_t id = ops[0];
303 ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
304 break;
305 }
306
307 case OpMemberName:
308 {
309 uint32_t id = ops[0];
310 uint32_t member = ops[1];
311 ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
312 break;
313 }
314
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +0100315 case OpDecorationGroup:
316 {
317 // Noop, this simply means an ID should be a collector of decorations.
318 // The meta array is already a flat array of decorations which will contain the relevant decorations.
319 break;
320 }
321
322 case OpGroupDecorate:
323 {
324 uint32_t group_id = ops[0];
325 auto &decorations = ir.meta[group_id].decoration;
326 auto &flags = decorations.decoration_flags;
327
328 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
329 // i.e., we cannot just copy the meta structure directly.
330 for (uint32_t i = 1; i < length; i++)
331 {
332 uint32_t target = ops[i];
333 flags.for_each_bit([&](uint32_t bit) {
334 auto decoration = static_cast<Decoration>(bit);
335
336 if (decoration_is_string(decoration))
337 {
338 ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
339 }
340 else
341 {
342 ir.meta[target].decoration_word_offset[decoration] =
343 ir.meta[group_id].decoration_word_offset[decoration];
344 ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
345 }
346 });
347 }
348 break;
349 }
350
351 case OpGroupMemberDecorate:
352 {
353 uint32_t group_id = ops[0];
354 auto &flags = ir.meta[group_id].decoration.decoration_flags;
355
356 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
357 // i.e., we cannot just copy the meta structure directly.
358 for (uint32_t i = 1; i + 1 < length; i += 2)
359 {
360 uint32_t target = ops[i + 0];
361 uint32_t index = ops[i + 1];
362 flags.for_each_bit([&](uint32_t bit) {
363 auto decoration = static_cast<Decoration>(bit);
364
365 if (decoration_is_string(decoration))
366 ir.set_member_decoration_string(target, index, decoration,
367 ir.get_decoration_string(group_id, decoration));
368 else
369 ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
370 });
371 }
372 break;
373 }
374
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200375 case OpDecorate:
376 case OpDecorateId:
377 {
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +0100378 // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
379 // so merge decorate and decorate-id here.
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200380 uint32_t id = ops[0];
381
382 auto decoration = static_cast<Decoration>(ops[1]);
383 if (length >= 3)
384 {
385 ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
386 ir.set_decoration(id, decoration, ops[2]);
387 }
388 else
389 ir.set_decoration(id, decoration);
390
391 break;
392 }
393
394 case OpDecorateStringGOOGLE:
395 {
396 uint32_t id = ops[0];
397 auto decoration = static_cast<Decoration>(ops[1]);
398 ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
399 break;
400 }
401
402 case OpMemberDecorate:
403 {
404 uint32_t id = ops[0];
405 uint32_t member = ops[1];
406 auto decoration = static_cast<Decoration>(ops[2]);
407 if (length >= 4)
408 ir.set_member_decoration(id, member, decoration, ops[3]);
409 else
410 ir.set_member_decoration(id, member, decoration);
411 break;
412 }
413
414 case OpMemberDecorateStringGOOGLE:
415 {
416 uint32_t id = ops[0];
417 uint32_t member = ops[1];
418 auto decoration = static_cast<Decoration>(ops[2]);
419 ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
420 break;
421 }
422
423 // Build up basic types.
424 case OpTypeVoid:
425 {
426 uint32_t id = ops[0];
427 auto &type = set<SPIRType>(id);
428 type.basetype = SPIRType::Void;
429 break;
430 }
431
432 case OpTypeBool:
433 {
434 uint32_t id = ops[0];
435 auto &type = set<SPIRType>(id);
436 type.basetype = SPIRType::Boolean;
437 type.width = 1;
438 break;
439 }
440
441 case OpTypeFloat:
442 {
443 uint32_t id = ops[0];
444 uint32_t width = ops[1];
445 auto &type = set<SPIRType>(id);
446 if (width == 64)
447 type.basetype = SPIRType::Double;
448 else if (width == 32)
449 type.basetype = SPIRType::Float;
450 else if (width == 16)
451 type.basetype = SPIRType::Half;
452 else
453 SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
454 type.width = width;
455 break;
456 }
457
458 case OpTypeInt:
459 {
460 uint32_t id = ops[0];
461 uint32_t width = ops[1];
Chip Davis117ccf42018-11-01 17:20:07 -0500462 bool signedness = ops[2];
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200463 auto &type = set<SPIRType>(id);
Chip Davis117ccf42018-11-01 17:20:07 -0500464 switch (width)
465 {
466 case 64:
467 type.basetype = signedness ? SPIRType::Int64 : SPIRType::UInt64;
468 break;
469 case 32:
470 type.basetype = signedness ? SPIRType::Int : SPIRType::UInt;
471 break;
472 case 16:
473 type.basetype = signedness ? SPIRType::Short : SPIRType::UShort;
474 break;
475 case 8:
476 type.basetype = signedness ? SPIRType::SByte : SPIRType::UByte;
477 break;
478 default:
479 SPIRV_CROSS_THROW("Unrecognized bit-width of integral type.");
480 }
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200481 type.width = width;
482 break;
483 }
484
485 // Build composite types by "inheriting".
486 // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
487 // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
488 case OpTypeVector:
489 {
490 uint32_t id = ops[0];
491 uint32_t vecsize = ops[2];
492
493 auto &base = get<SPIRType>(ops[1]);
494 auto &vecbase = set<SPIRType>(id);
495
496 vecbase = base;
497 vecbase.vecsize = vecsize;
498 vecbase.self = id;
499 vecbase.parent_type = ops[1];
500 break;
501 }
502
503 case OpTypeMatrix:
504 {
505 uint32_t id = ops[0];
506 uint32_t colcount = ops[2];
507
508 auto &base = get<SPIRType>(ops[1]);
509 auto &matrixbase = set<SPIRType>(id);
510
511 matrixbase = base;
512 matrixbase.columns = colcount;
513 matrixbase.self = id;
514 matrixbase.parent_type = ops[1];
515 break;
516 }
517
518 case OpTypeArray:
519 {
520 uint32_t id = ops[0];
521 auto &arraybase = set<SPIRType>(id);
522
523 uint32_t tid = ops[1];
524 auto &base = get<SPIRType>(tid);
525
526 arraybase = base;
527 arraybase.parent_type = tid;
528
529 uint32_t cid = ops[2];
530 ir.mark_used_as_array_length(cid);
531 auto *c = maybe_get<SPIRConstant>(cid);
532 bool literal = c && !c->specialization;
533
534 arraybase.array_size_literal.push_back(literal);
535 arraybase.array.push_back(literal ? c->scalar() : cid);
536 // Do NOT set arraybase.self!
537 break;
538 }
539
540 case OpTypeRuntimeArray:
541 {
542 uint32_t id = ops[0];
543
544 auto &base = get<SPIRType>(ops[1]);
545 auto &arraybase = set<SPIRType>(id);
546
547 arraybase = base;
548 arraybase.array.push_back(0);
549 arraybase.array_size_literal.push_back(true);
550 arraybase.parent_type = ops[1];
551 // Do NOT set arraybase.self!
552 break;
553 }
554
555 case OpTypeImage:
556 {
557 uint32_t id = ops[0];
558 auto &type = set<SPIRType>(id);
559 type.basetype = SPIRType::Image;
560 type.image.type = ops[1];
561 type.image.dim = static_cast<Dim>(ops[2]);
562 type.image.depth = ops[3] == 1;
563 type.image.arrayed = ops[4] != 0;
564 type.image.ms = ops[5] != 0;
565 type.image.sampled = ops[6];
566 type.image.format = static_cast<ImageFormat>(ops[7]);
567 type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
568
569 if (type.image.sampled == 0)
570 SPIRV_CROSS_THROW("OpTypeImage Sampled parameter must not be zero.");
571
572 break;
573 }
574
575 case OpTypeSampledImage:
576 {
577 uint32_t id = ops[0];
578 uint32_t imagetype = ops[1];
579 auto &type = set<SPIRType>(id);
580 type = get<SPIRType>(imagetype);
581 type.basetype = SPIRType::SampledImage;
582 type.self = id;
583 break;
584 }
585
586 case OpTypeSampler:
587 {
588 uint32_t id = ops[0];
589 auto &type = set<SPIRType>(id);
590 type.basetype = SPIRType::Sampler;
591 break;
592 }
593
594 case OpTypePointer:
595 {
596 uint32_t id = ops[0];
597
598 auto &base = get<SPIRType>(ops[2]);
599 auto &ptrbase = set<SPIRType>(id);
600
601 ptrbase = base;
602 if (ptrbase.pointer)
603 SPIRV_CROSS_THROW("Cannot make pointer-to-pointer type.");
604 ptrbase.pointer = true;
605 ptrbase.storage = static_cast<StorageClass>(ops[1]);
606
607 if (ptrbase.storage == StorageClassAtomicCounter)
608 ptrbase.basetype = SPIRType::AtomicCounter;
609
610 ptrbase.parent_type = ops[2];
611
612 // Do NOT set ptrbase.self!
613 break;
614 }
615
616 case OpTypeStruct:
617 {
618 uint32_t id = ops[0];
619 auto &type = set<SPIRType>(id);
620 type.basetype = SPIRType::Struct;
621 for (uint32_t i = 1; i < length; i++)
622 type.member_types.push_back(ops[i]);
623
624 // Check if we have seen this struct type before, with just different
625 // decorations.
626 //
627 // Add workaround for issue #17 as well by looking at OpName for the struct
628 // types, which we shouldn't normally do.
629 // We should not normally have to consider type aliases like this to begin with
630 // however ... glslang issues #304, #307 cover this.
631
632 // For stripped names, never consider struct type aliasing.
633 // We risk declaring the same struct multiple times, but type-punning is not allowed
634 // so this is safe.
635 bool consider_aliasing = !ir.get_name(type.self).empty();
636 if (consider_aliasing)
637 {
638 for (auto &other : global_struct_cache)
639 {
640 if (ir.get_name(type.self) == ir.get_name(other) &&
641 types_are_logically_equivalent(type, get<SPIRType>(other)))
642 {
643 type.type_alias = other;
644 break;
645 }
646 }
647
648 if (type.type_alias == 0)
649 global_struct_cache.push_back(id);
650 }
651 break;
652 }
653
654 case OpTypeFunction:
655 {
656 uint32_t id = ops[0];
657 uint32_t ret = ops[1];
658
659 auto &func = set<SPIRFunctionPrototype>(id, ret);
660 for (uint32_t i = 2; i < length; i++)
661 func.parameter_types.push_back(ops[i]);
662 break;
663 }
664
665 // Variable declaration
666 // All variables are essentially pointers with a storage qualifier.
667 case OpVariable:
668 {
669 uint32_t type = ops[0];
670 uint32_t id = ops[1];
671 auto storage = static_cast<StorageClass>(ops[2]);
672 uint32_t initializer = length == 4 ? ops[3] : 0;
673
674 if (storage == StorageClassFunction)
675 {
676 if (!current_function)
677 SPIRV_CROSS_THROW("No function currently in scope");
678 current_function->add_local_variable(id);
679 }
680
681 set<SPIRVariable>(id, type, storage, initializer);
682
683 // hlsl based shaders don't have those decorations. force them and then reset when reading/writing images
684 auto &ttype = get<SPIRType>(type);
685 if (ttype.basetype == SPIRType::BaseType::Image)
686 {
687 ir.set_decoration(id, DecorationNonWritable);
688 ir.set_decoration(id, DecorationNonReadable);
689 }
690
691 break;
692 }
693
694 // OpPhi
695 // OpPhi is a fairly magical opcode.
696 // It selects temporary variables based on which parent block we *came from*.
697 // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
698 // variable to emulate SSA Phi.
699 case OpPhi:
700 {
701 if (!current_function)
702 SPIRV_CROSS_THROW("No function currently in scope");
703 if (!current_block)
704 SPIRV_CROSS_THROW("No block currently in scope");
705
706 uint32_t result_type = ops[0];
707 uint32_t id = ops[1];
708
709 // Instead of a temporary, create a new function-wide temporary with this ID instead.
710 auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction);
711 var.phi_variable = true;
712
713 current_function->add_local_variable(id);
714
715 for (uint32_t i = 2; i + 2 <= length; i += 2)
716 current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
717 break;
718 }
719
720 // Constants
721 case OpSpecConstant:
722 case OpConstant:
723 {
724 uint32_t id = ops[1];
725 auto &type = get<SPIRType>(ops[0]);
726
727 if (type.width > 32)
728 set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
729 else
730 set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
731 break;
732 }
733
734 case OpSpecConstantFalse:
735 case OpConstantFalse:
736 {
737 uint32_t id = ops[1];
738 set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
739 break;
740 }
741
742 case OpSpecConstantTrue:
743 case OpConstantTrue:
744 {
745 uint32_t id = ops[1];
746 set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
747 break;
748 }
749
750 case OpConstantNull:
751 {
752 uint32_t id = ops[1];
753 uint32_t type = ops[0];
754 make_constant_null(id, type);
755 break;
756 }
757
758 case OpSpecConstantComposite:
759 case OpConstantComposite:
760 {
761 uint32_t id = ops[1];
762 uint32_t type = ops[0];
763
764 auto &ctype = get<SPIRType>(type);
765
766 // We can have constants which are structs and arrays.
767 // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
768 // can refer to.
769 if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
770 {
771 set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
772 }
773 else
774 {
775 uint32_t elements = length - 2;
776 if (elements > 4)
777 SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
778
779 SPIRConstant remapped_constant_ops[4];
780 const SPIRConstant *c[4];
781 for (uint32_t i = 0; i < elements; i++)
782 {
783 // Specialization constants operations can also be part of this.
784 // We do not know their value, so any attempt to query SPIRConstant later
785 // will fail. We can only propagate the ID of the expression and use to_expression on it.
786 auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
787 if (constant_op)
788 {
789 if (op == OpConstantComposite)
790 SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
791
792 remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
793 remapped_constant_ops[i].self = constant_op->self;
794 remapped_constant_ops[i].constant_type = constant_op->basetype;
795 remapped_constant_ops[i].specialization = true;
796 c[i] = &remapped_constant_ops[i];
797 }
798 else
799 c[i] = &get<SPIRConstant>(ops[2 + i]);
800 }
801 set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
802 }
803 break;
804 }
805
806 // Functions
807 case OpFunction:
808 {
809 uint32_t res = ops[0];
810 uint32_t id = ops[1];
811 // Control
812 uint32_t type = ops[3];
813
814 if (current_function)
815 SPIRV_CROSS_THROW("Must end a function before starting a new one!");
816
817 current_function = &set<SPIRFunction>(id, res, type);
818 break;
819 }
820
821 case OpFunctionParameter:
822 {
823 uint32_t type = ops[0];
824 uint32_t id = ops[1];
825
826 if (!current_function)
827 SPIRV_CROSS_THROW("Must be in a function!");
828
829 current_function->add_parameter(type, id);
830 set<SPIRVariable>(id, type, StorageClassFunction);
831 break;
832 }
833
834 case OpFunctionEnd:
835 {
836 if (current_block)
837 {
838 // Very specific error message, but seems to come up quite often.
839 SPIRV_CROSS_THROW(
840 "Cannot end a function before ending the current block.\n"
841 "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
842 }
843 current_function = nullptr;
844 break;
845 }
846
847 // Blocks
848 case OpLabel:
849 {
850 // OpLabel always starts a block.
851 if (!current_function)
852 SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
853
854 uint32_t id = ops[0];
855
856 current_function->blocks.push_back(id);
857 if (!current_function->entry_block)
858 current_function->entry_block = id;
859
860 if (current_block)
861 SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
862
863 current_block = &set<SPIRBlock>(id);
864 break;
865 }
866
867 // Branch instructions end blocks.
868 case OpBranch:
869 {
870 if (!current_block)
871 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
872
873 uint32_t target = ops[0];
874 current_block->terminator = SPIRBlock::Direct;
875 current_block->next_block = target;
876 current_block = nullptr;
877 break;
878 }
879
880 case OpBranchConditional:
881 {
882 if (!current_block)
883 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
884
885 current_block->condition = ops[0];
886 current_block->true_block = ops[1];
887 current_block->false_block = ops[2];
888
889 current_block->terminator = SPIRBlock::Select;
890 current_block = nullptr;
891 break;
892 }
893
894 case OpSwitch:
895 {
896 if (!current_block)
897 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
898
899 if (current_block->merge == SPIRBlock::MergeNone)
900 SPIRV_CROSS_THROW("Switch statement is not structured");
901
902 current_block->terminator = SPIRBlock::MultiSelect;
903
904 current_block->condition = ops[0];
905 current_block->default_block = ops[1];
906
907 for (uint32_t i = 2; i + 2 <= length; i += 2)
908 current_block->cases.push_back({ ops[i], ops[i + 1] });
909
910 // If we jump to next block, make it break instead since we're inside a switch case block at that point.
911 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
912
913 current_block = nullptr;
914 break;
915 }
916
917 case OpKill:
918 {
919 if (!current_block)
920 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
921 current_block->terminator = SPIRBlock::Kill;
922 current_block = nullptr;
923 break;
924 }
925
926 case OpReturn:
927 {
928 if (!current_block)
929 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
930 current_block->terminator = SPIRBlock::Return;
931 current_block = nullptr;
932 break;
933 }
934
935 case OpReturnValue:
936 {
937 if (!current_block)
938 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
939 current_block->terminator = SPIRBlock::Return;
940 current_block->return_value = ops[0];
941 current_block = nullptr;
942 break;
943 }
944
945 case OpUnreachable:
946 {
947 if (!current_block)
948 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
949 current_block->terminator = SPIRBlock::Unreachable;
950 current_block = nullptr;
951 break;
952 }
953
954 case OpSelectionMerge:
955 {
956 if (!current_block)
957 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
958
959 current_block->next_block = ops[0];
960 current_block->merge = SPIRBlock::MergeSelection;
961 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
962
963 if (length >= 2)
964 {
965 if (ops[1] & SelectionControlFlattenMask)
966 current_block->hint = SPIRBlock::HintFlatten;
967 else if (ops[1] & SelectionControlDontFlattenMask)
968 current_block->hint = SPIRBlock::HintDontFlatten;
969 }
970 break;
971 }
972
973 case OpLoopMerge:
974 {
975 if (!current_block)
976 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
977
978 current_block->merge_block = ops[0];
979 current_block->continue_block = ops[1];
980 current_block->merge = SPIRBlock::MergeLoop;
981
982 ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
983 ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
984
985 ir.continue_block_to_loop_header[current_block->continue_block] = current_block->self;
986
987 // Don't add loop headers to continue blocks,
988 // which would make it impossible branch into the loop header since
989 // they are treated as continues.
990 if (current_block->continue_block != current_block->self)
991 ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
992
993 if (length >= 3)
994 {
995 if (ops[2] & LoopControlUnrollMask)
996 current_block->hint = SPIRBlock::HintUnroll;
997 else if (ops[2] & LoopControlDontUnrollMask)
998 current_block->hint = SPIRBlock::HintDontUnroll;
999 }
1000 break;
1001 }
1002
1003 case OpSpecConstantOp:
1004 {
1005 if (length < 3)
1006 SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1007
1008 uint32_t result_type = ops[0];
1009 uint32_t id = ops[1];
1010 auto spec_op = static_cast<Op>(ops[2]);
1011
1012 set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
1013 break;
1014 }
1015
1016 // Actual opcodes.
1017 default:
1018 {
1019 if (!current_block)
1020 SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1021
1022 current_block->ops.push_back(instruction);
1023 break;
1024 }
1025 }
1026}
1027
1028bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
1029{
1030 if (a.basetype != b.basetype)
1031 return false;
1032 if (a.width != b.width)
1033 return false;
1034 if (a.vecsize != b.vecsize)
1035 return false;
1036 if (a.columns != b.columns)
1037 return false;
1038 if (a.array.size() != b.array.size())
1039 return false;
1040
1041 size_t array_count = a.array.size();
1042 if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
1043 return false;
1044
1045 if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
1046 {
1047 if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
1048 return false;
1049 }
1050
1051 if (a.member_types.size() != b.member_types.size())
1052 return false;
1053
1054 size_t member_types = a.member_types.size();
1055 for (size_t i = 0; i < member_types; i++)
1056 {
1057 if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
1058 return false;
1059 }
1060
1061 return true;
1062}
1063
1064bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
1065{
1066 auto &type = get<SPIRType>(v.basetype);
1067 bool ssbo = v.storage == StorageClassStorageBuffer ||
1068 ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
1069 bool image = type.basetype == SPIRType::Image;
1070 bool counter = type.basetype == SPIRType::AtomicCounter;
1071
1072 bool is_restrict;
1073 if (ssbo)
1074 is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
1075 else
1076 is_restrict = ir.has_decoration(v.self, DecorationRestrict);
1077
1078 return !is_restrict && (ssbo || image || counter);
1079}
1080
1081void Parser::make_constant_null(uint32_t id, uint32_t type)
1082{
1083 auto &constant_type = get<SPIRType>(type);
1084
1085 if (!constant_type.array.empty())
1086 {
1087 assert(constant_type.parent_type);
1088 uint32_t parent_id = ir.increase_bound_by(1);
1089 make_constant_null(parent_id, constant_type.parent_type);
1090
1091 if (!constant_type.array_size_literal.back())
1092 SPIRV_CROSS_THROW("Array size of OpConstantNull must be a literal.");
1093
1094 vector<uint32_t> elements(constant_type.array.back());
1095 for (uint32_t i = 0; i < constant_type.array.back(); i++)
1096 elements[i] = parent_id;
1097 set<SPIRConstant>(id, type, elements.data(), uint32_t(elements.size()), false);
1098 }
1099 else if (!constant_type.member_types.empty())
1100 {
1101 uint32_t member_ids = ir.increase_bound_by(uint32_t(constant_type.member_types.size()));
1102 vector<uint32_t> elements(constant_type.member_types.size());
1103 for (uint32_t i = 0; i < constant_type.member_types.size(); i++)
1104 {
1105 make_constant_null(member_ids + i, constant_type.member_types[i]);
1106 elements[i] = member_ids + i;
1107 }
1108 set<SPIRConstant>(id, type, elements.data(), uint32_t(elements.size()), false);
1109 }
1110 else
1111 {
1112 auto &constant = set<SPIRConstant>(id, type);
1113 constant.make_null(constant_type);
1114 }
1115}
1116
Chip Davis117ccf42018-11-01 17:20:07 -05001117} // namespace spirv_cross