blob: e65e3608ed968430aa628fb0d3a3f9b1e2c52344 [file] [log] [blame]
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001/*
Hans-Kristian Arntzenf9818f02020-01-16 15:24:37 +01002 * Copyright 2018-2020 Arm Limited
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "spirv_parser.hpp"
18#include <assert.h>
19
20using namespace std;
21using namespace spv;
22
Hans-Kristian Arntzen9b92e682019-03-29 10:29:44 +010023namespace SPIRV_CROSS_NAMESPACE
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020024{
Hans-Kristian Arntzen3fe57d32019-04-09 12:46:23 +020025Parser::Parser(vector<uint32_t> spirv)
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020026{
27 ir.spirv = move(spirv);
28}
29
30Parser::Parser(const uint32_t *spirv_data, size_t word_count)
31{
Hans-Kristian Arntzen3fe57d32019-04-09 12:46:23 +020032 ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020033}
34
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +010035static bool decoration_is_string(Decoration decoration)
36{
37 switch (decoration)
38 {
39 case DecorationHlslSemanticGOOGLE:
40 return true;
41
42 default:
43 return false;
44 }
45}
46
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020047static inline uint32_t swap_endian(uint32_t v)
48{
49 return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
50}
51
52static bool is_valid_spirv_version(uint32_t version)
53{
54 switch (version)
55 {
56 // Allow v99 since it tends to just work.
57 case 99:
58 case 0x10000: // SPIR-V 1.0
59 case 0x10100: // SPIR-V 1.1
60 case 0x10200: // SPIR-V 1.2
61 case 0x10300: // SPIR-V 1.3
Hans-Kristian Arntzenab1fa902019-05-07 09:53:40 +020062 case 0x10400: // SPIR-V 1.4
Hans-Kristian Arntzen02c34fe2019-09-19 10:26:04 +020063 case 0x10500: // SPIR-V 1.5
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020064 return true;
65
66 default:
67 return false;
68 }
69}
70
71void Parser::parse()
72{
73 auto &spirv = ir.spirv;
74
75 auto len = spirv.size();
76 if (len < 5)
77 SPIRV_CROSS_THROW("SPIRV file too small.");
78
79 auto s = spirv.data();
80
81 // Endian-swap if we need to.
82 if (s[0] == swap_endian(MagicNumber))
83 transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
84
85 if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
86 SPIRV_CROSS_THROW("Invalid SPIRV format.");
87
88 uint32_t bound = s[3];
Hans-Kristian Arntzen92a42942020-02-14 12:57:01 +010089
90 const uint32_t MaximumNumberOfIDs = 0x3fffff;
91 if (bound > MaximumNumberOfIDs)
92 SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
93
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020094 ir.set_id_bounds(bound);
95
96 uint32_t offset = 5;
97
Hans-Kristian Arntzena489ba72019-04-02 11:19:03 +020098 SmallVector<Instruction> instructions;
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +020099 while (offset < len)
100 {
101 Instruction instr = {};
102 instr.op = spirv[offset] & 0xffff;
103 instr.count = (spirv[offset] >> 16) & 0xffff;
104
105 if (instr.count == 0)
106 SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
107
108 instr.offset = offset + 1;
109 instr.length = instr.count - 1;
110
111 offset += instr.count;
112
113 if (offset > spirv.size())
114 SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
115
116 instructions.push_back(instr);
117 }
118
119 for (auto &i : instructions)
120 parse(i);
121
122 if (current_function)
123 SPIRV_CROSS_THROW("Function was not terminated.");
124 if (current_block)
125 SPIRV_CROSS_THROW("Block was not terminated.");
126}
127
128const uint32_t *Parser::stream(const Instruction &instr) const
129{
130 // If we're not going to use any arguments, just return nullptr.
131 // We want to avoid case where we return an out of range pointer
132 // that trips debug assertions on some platforms.
133 if (!instr.length)
134 return nullptr;
135
136 if (instr.offset + instr.length > ir.spirv.size())
137 SPIRV_CROSS_THROW("Compiler::stream() out of range.");
138 return &ir.spirv[instr.offset];
139}
140
Hans-Kristian Arntzen3fe57d32019-04-09 12:46:23 +0200141static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200142{
143 string ret;
144 for (uint32_t i = offset; i < spirv.size(); i++)
145 {
146 uint32_t w = spirv[i];
147
148 for (uint32_t j = 0; j < 4; j++, w >>= 8)
149 {
150 char c = w & 0xff;
151 if (c == '\0')
152 return ret;
153 ret += c;
154 }
155 }
156
157 SPIRV_CROSS_THROW("String was not terminated before EOF");
158}
159
160void Parser::parse(const Instruction &instruction)
161{
162 auto *ops = stream(instruction);
163 auto op = static_cast<Op>(instruction.op);
164 uint32_t length = instruction.length;
165
166 switch (op)
167 {
lifpan00a765e2018-11-15 09:04:36 +0800168 case OpSourceContinued:
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200169 case OpSourceExtension:
170 case OpNop:
lifpan91610962018-11-13 14:28:38 +0800171 case OpModuleProcessed:
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200172 break;
173
Hans-Kristian Arntzen65af09d2019-05-28 13:41:46 +0200174 case OpString:
175 {
176 set<SPIRString>(ops[0], extract_string(ir.spirv, instruction.offset + 1));
177 break;
178 }
179
Hans-Kristian Arntzen2cc374a2019-04-24 14:12:50 +0200180 case OpMemoryModel:
181 ir.addressing_model = static_cast<AddressingModel>(ops[0]);
182 ir.memory_model = static_cast<MemoryModel>(ops[1]);
183 break;
184
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200185 case OpSource:
186 {
187 auto lang = static_cast<SourceLanguage>(ops[0]);
188 switch (lang)
189 {
190 case SourceLanguageESSL:
191 ir.source.es = true;
192 ir.source.version = ops[1];
193 ir.source.known = true;
194 ir.source.hlsl = false;
195 break;
196
197 case SourceLanguageGLSL:
198 ir.source.es = false;
199 ir.source.version = ops[1];
200 ir.source.known = true;
201 ir.source.hlsl = false;
202 break;
203
204 case SourceLanguageHLSL:
205 // For purposes of cross-compiling, this is GLSL 450.
206 ir.source.es = false;
207 ir.source.version = 450;
208 ir.source.known = true;
209 ir.source.hlsl = true;
210 break;
211
212 default:
213 ir.source.known = false;
214 break;
215 }
216 break;
217 }
218
219 case OpUndef:
220 {
221 uint32_t result_type = ops[0];
222 uint32_t id = ops[1];
223 set<SPIRUndef>(id, result_type);
lifpan876627d2019-04-08 19:45:31 +0800224 if (current_block)
225 current_block->ops.push_back(instruction);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200226 break;
227 }
228
229 case OpCapability:
230 {
231 uint32_t cap = ops[0];
232 if (cap == CapabilityKernel)
233 SPIRV_CROSS_THROW("Kernel capability not supported.");
234
235 ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
236 break;
237 }
238
239 case OpExtension:
240 {
241 auto ext = extract_string(ir.spirv, instruction.offset);
242 ir.declared_extensions.push_back(move(ext));
243 break;
244 }
245
246 case OpExtInstImport:
247 {
248 uint32_t id = ops[0];
249 auto ext = extract_string(ir.spirv, instruction.offset + 1);
250 if (ext == "GLSL.std.450")
251 set<SPIRExtension>(id, SPIRExtension::GLSL);
Lifeng Pan5ca87792019-07-04 16:03:06 +0800252 else if (ext == "DebugInfo")
253 set<SPIRExtension>(id, SPIRExtension::SPV_debug_info);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200254 else if (ext == "SPV_AMD_shader_ballot")
255 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_ballot);
256 else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
257 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter);
258 else if (ext == "SPV_AMD_shader_trinary_minmax")
259 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_trinary_minmax);
260 else if (ext == "SPV_AMD_gcn_shader")
261 set<SPIRExtension>(id, SPIRExtension::SPV_AMD_gcn_shader);
262 else
263 set<SPIRExtension>(id, SPIRExtension::Unsupported);
264
265 // Other SPIR-V extensions which have ExtInstrs are currently not supported.
266
267 break;
268 }
269
Lifeng Pan5ca87792019-07-04 16:03:06 +0800270 case OpExtInst:
271 {
272 // The SPIR-V debug information extended instructions might come at global scope.
273 if (current_block)
274 current_block->ops.push_back(instruction);
275 break;
276 }
277
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200278 case OpEntryPoint:
279 {
280 auto itr =
281 ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
282 extract_string(ir.spirv, instruction.offset + 2))));
283 auto &e = itr.first->second;
284
285 // Strings need nul-terminator and consume the whole word.
286 uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
Hans-Kristian Arntzen333980a2019-09-05 12:43:40 +0200287
288 for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
289 e.interface_variables.push_back(ops[i]);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200290
291 // Set the name of the entry point in case OpName is not provided later.
292 ir.set_name(ops[1], e.name);
293
294 // If we don't have an entry, make the first one our "default".
295 if (!ir.default_entry_point)
296 ir.default_entry_point = ops[1];
297 break;
298 }
299
300 case OpExecutionMode:
301 {
302 auto &execution = ir.entry_points[ops[0]];
303 auto mode = static_cast<ExecutionMode>(ops[1]);
304 execution.flags.set(mode);
305
306 switch (mode)
307 {
308 case ExecutionModeInvocations:
309 execution.invocations = ops[2];
310 break;
311
312 case ExecutionModeLocalSize:
313 execution.workgroup_size.x = ops[2];
314 execution.workgroup_size.y = ops[3];
315 execution.workgroup_size.z = ops[4];
316 break;
317
318 case ExecutionModeOutputVertices:
319 execution.output_vertices = ops[2];
320 break;
321
322 default:
323 break;
324 }
325 break;
326 }
327
328 case OpName:
329 {
330 uint32_t id = ops[0];
331 ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
332 break;
333 }
334
335 case OpMemberName:
336 {
337 uint32_t id = ops[0];
338 uint32_t member = ops[1];
339 ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
340 break;
341 }
342
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +0100343 case OpDecorationGroup:
344 {
345 // Noop, this simply means an ID should be a collector of decorations.
346 // The meta array is already a flat array of decorations which will contain the relevant decorations.
347 break;
348 }
349
350 case OpGroupDecorate:
351 {
352 uint32_t group_id = ops[0];
353 auto &decorations = ir.meta[group_id].decoration;
354 auto &flags = decorations.decoration_flags;
355
356 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
357 // i.e., we cannot just copy the meta structure directly.
358 for (uint32_t i = 1; i < length; i++)
359 {
360 uint32_t target = ops[i];
361 flags.for_each_bit([&](uint32_t bit) {
362 auto decoration = static_cast<Decoration>(bit);
363
364 if (decoration_is_string(decoration))
365 {
366 ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
367 }
368 else
369 {
370 ir.meta[target].decoration_word_offset[decoration] =
371 ir.meta[group_id].decoration_word_offset[decoration];
372 ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
373 }
374 });
375 }
376 break;
377 }
378
379 case OpGroupMemberDecorate:
380 {
381 uint32_t group_id = ops[0];
382 auto &flags = ir.meta[group_id].decoration.decoration_flags;
383
384 // Copies decorations from one ID to another. Only copy decorations which are set in the group,
385 // i.e., we cannot just copy the meta structure directly.
386 for (uint32_t i = 1; i + 1 < length; i += 2)
387 {
388 uint32_t target = ops[i + 0];
389 uint32_t index = ops[i + 1];
390 flags.for_each_bit([&](uint32_t bit) {
391 auto decoration = static_cast<Decoration>(bit);
392
393 if (decoration_is_string(decoration))
394 ir.set_member_decoration_string(target, index, decoration,
395 ir.get_decoration_string(group_id, decoration));
396 else
397 ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
398 });
399 }
400 break;
401 }
402
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200403 case OpDecorate:
404 case OpDecorateId:
405 {
Hans-Kristian Arntzenfa42ed32018-11-15 10:51:01 +0100406 // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
407 // so merge decorate and decorate-id here.
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200408 uint32_t id = ops[0];
409
410 auto decoration = static_cast<Decoration>(ops[1]);
411 if (length >= 3)
412 {
413 ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
414 ir.set_decoration(id, decoration, ops[2]);
415 }
416 else
417 ir.set_decoration(id, decoration);
418
419 break;
420 }
421
422 case OpDecorateStringGOOGLE:
423 {
424 uint32_t id = ops[0];
425 auto decoration = static_cast<Decoration>(ops[1]);
426 ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
427 break;
428 }
429
430 case OpMemberDecorate:
431 {
432 uint32_t id = ops[0];
433 uint32_t member = ops[1];
434 auto decoration = static_cast<Decoration>(ops[2]);
435 if (length >= 4)
436 ir.set_member_decoration(id, member, decoration, ops[3]);
437 else
438 ir.set_member_decoration(id, member, decoration);
439 break;
440 }
441
442 case OpMemberDecorateStringGOOGLE:
443 {
444 uint32_t id = ops[0];
445 uint32_t member = ops[1];
446 auto decoration = static_cast<Decoration>(ops[2]);
447 ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
448 break;
449 }
450
451 // Build up basic types.
452 case OpTypeVoid:
453 {
454 uint32_t id = ops[0];
455 auto &type = set<SPIRType>(id);
456 type.basetype = SPIRType::Void;
457 break;
458 }
459
460 case OpTypeBool:
461 {
462 uint32_t id = ops[0];
463 auto &type = set<SPIRType>(id);
464 type.basetype = SPIRType::Boolean;
465 type.width = 1;
466 break;
467 }
468
469 case OpTypeFloat:
470 {
471 uint32_t id = ops[0];
472 uint32_t width = ops[1];
473 auto &type = set<SPIRType>(id);
474 if (width == 64)
475 type.basetype = SPIRType::Double;
476 else if (width == 32)
477 type.basetype = SPIRType::Float;
478 else if (width == 16)
479 type.basetype = SPIRType::Half;
480 else
481 SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
482 type.width = width;
483 break;
484 }
485
486 case OpTypeInt:
487 {
488 uint32_t id = ops[0];
489 uint32_t width = ops[1];
lifpanb21525b2018-11-28 14:20:24 +0800490 bool signedness = ops[2] != 0;
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200491 auto &type = set<SPIRType>(id);
Hans-Kristian Arntzen2ed171e2019-01-30 14:49:55 +0100492 type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200493 type.width = width;
494 break;
495 }
496
497 // Build composite types by "inheriting".
498 // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
499 // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
500 case OpTypeVector:
501 {
502 uint32_t id = ops[0];
503 uint32_t vecsize = ops[2];
504
505 auto &base = get<SPIRType>(ops[1]);
506 auto &vecbase = set<SPIRType>(id);
507
508 vecbase = base;
509 vecbase.vecsize = vecsize;
510 vecbase.self = id;
511 vecbase.parent_type = ops[1];
512 break;
513 }
514
515 case OpTypeMatrix:
516 {
517 uint32_t id = ops[0];
518 uint32_t colcount = ops[2];
519
520 auto &base = get<SPIRType>(ops[1]);
521 auto &matrixbase = set<SPIRType>(id);
522
523 matrixbase = base;
524 matrixbase.columns = colcount;
525 matrixbase.self = id;
526 matrixbase.parent_type = ops[1];
527 break;
528 }
529
530 case OpTypeArray:
531 {
532 uint32_t id = ops[0];
533 auto &arraybase = set<SPIRType>(id);
534
535 uint32_t tid = ops[1];
536 auto &base = get<SPIRType>(tid);
537
538 arraybase = base;
539 arraybase.parent_type = tid;
540
541 uint32_t cid = ops[2];
542 ir.mark_used_as_array_length(cid);
543 auto *c = maybe_get<SPIRConstant>(cid);
544 bool literal = c && !c->specialization;
545
546 arraybase.array_size_literal.push_back(literal);
547 arraybase.array.push_back(literal ? c->scalar() : cid);
548 // Do NOT set arraybase.self!
549 break;
550 }
551
552 case OpTypeRuntimeArray:
553 {
554 uint32_t id = ops[0];
555
556 auto &base = get<SPIRType>(ops[1]);
557 auto &arraybase = set<SPIRType>(id);
558
559 arraybase = base;
560 arraybase.array.push_back(0);
561 arraybase.array_size_literal.push_back(true);
562 arraybase.parent_type = ops[1];
563 // Do NOT set arraybase.self!
564 break;
565 }
566
567 case OpTypeImage:
568 {
569 uint32_t id = ops[0];
570 auto &type = set<SPIRType>(id);
571 type.basetype = SPIRType::Image;
572 type.image.type = ops[1];
573 type.image.dim = static_cast<Dim>(ops[2]);
574 type.image.depth = ops[3] == 1;
575 type.image.arrayed = ops[4] != 0;
576 type.image.ms = ops[5] != 0;
577 type.image.sampled = ops[6];
578 type.image.format = static_cast<ImageFormat>(ops[7]);
579 type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200580 break;
581 }
582
583 case OpTypeSampledImage:
584 {
585 uint32_t id = ops[0];
586 uint32_t imagetype = ops[1];
587 auto &type = set<SPIRType>(id);
588 type = get<SPIRType>(imagetype);
589 type.basetype = SPIRType::SampledImage;
590 type.self = id;
591 break;
592 }
593
594 case OpTypeSampler:
595 {
596 uint32_t id = ops[0];
597 auto &type = set<SPIRType>(id);
598 type.basetype = SPIRType::Sampler;
599 break;
600 }
601
602 case OpTypePointer:
603 {
604 uint32_t id = ops[0];
605
606 auto &base = get<SPIRType>(ops[2]);
607 auto &ptrbase = set<SPIRType>(id);
608
609 ptrbase = base;
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200610 ptrbase.pointer = true;
Hans-Kristian Arntzend0b93722018-11-26 12:23:28 +0100611 ptrbase.pointer_depth++;
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200612 ptrbase.storage = static_cast<StorageClass>(ops[1]);
613
614 if (ptrbase.storage == StorageClassAtomicCounter)
615 ptrbase.basetype = SPIRType::AtomicCounter;
616
617 ptrbase.parent_type = ops[2];
618
619 // Do NOT set ptrbase.self!
620 break;
621 }
622
Hans-Kristian Arntzen2cc374a2019-04-24 14:12:50 +0200623 case OpTypeForwardPointer:
624 {
625 uint32_t id = ops[0];
626 auto &ptrbase = set<SPIRType>(id);
627 ptrbase.pointer = true;
628 ptrbase.pointer_depth++;
629 ptrbase.storage = static_cast<StorageClass>(ops[1]);
630
631 if (ptrbase.storage == StorageClassAtomicCounter)
632 ptrbase.basetype = SPIRType::AtomicCounter;
633
634 break;
635 }
636
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200637 case OpTypeStruct:
638 {
639 uint32_t id = ops[0];
640 auto &type = set<SPIRType>(id);
641 type.basetype = SPIRType::Struct;
642 for (uint32_t i = 1; i < length; i++)
643 type.member_types.push_back(ops[i]);
644
645 // Check if we have seen this struct type before, with just different
646 // decorations.
647 //
648 // Add workaround for issue #17 as well by looking at OpName for the struct
649 // types, which we shouldn't normally do.
650 // We should not normally have to consider type aliases like this to begin with
651 // however ... glslang issues #304, #307 cover this.
652
653 // For stripped names, never consider struct type aliasing.
654 // We risk declaring the same struct multiple times, but type-punning is not allowed
655 // so this is safe.
656 bool consider_aliasing = !ir.get_name(type.self).empty();
657 if (consider_aliasing)
658 {
659 for (auto &other : global_struct_cache)
660 {
661 if (ir.get_name(type.self) == ir.get_name(other) &&
662 types_are_logically_equivalent(type, get<SPIRType>(other)))
663 {
664 type.type_alias = other;
665 break;
666 }
667 }
668
Hans-Kristian Arntzen333980a2019-09-05 12:43:40 +0200669 if (type.type_alias == TypeID(0))
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200670 global_struct_cache.push_back(id);
671 }
672 break;
673 }
674
675 case OpTypeFunction:
676 {
677 uint32_t id = ops[0];
678 uint32_t ret = ops[1];
679
680 auto &func = set<SPIRFunctionPrototype>(id, ret);
681 for (uint32_t i = 2; i < length; i++)
682 func.parameter_types.push_back(ops[i]);
683 break;
684 }
685
Hans-Kristian Arntzen6b0e5582020-04-21 14:25:18 +0200686 case OpTypeAccelerationStructureKHR:
Patrick Moursda39a7b2019-02-26 15:43:03 +0100687 {
688 uint32_t id = ops[0];
689 auto &type = set<SPIRType>(id);
Hans-Kristian Arntzen6b0e5582020-04-21 14:25:18 +0200690 type.basetype = SPIRType::AccelerationStructure;
691 break;
692 }
693
694 case OpTypeRayQueryProvisionalKHR:
695 {
696 uint32_t id = ops[0];
697 auto &type = set<SPIRType>(id);
698 type.basetype = SPIRType::RayQuery;
Patrick Moursda39a7b2019-02-26 15:43:03 +0100699 break;
700 }
701
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200702 // Variable declaration
703 // All variables are essentially pointers with a storage qualifier.
704 case OpVariable:
705 {
706 uint32_t type = ops[0];
707 uint32_t id = ops[1];
708 auto storage = static_cast<StorageClass>(ops[2]);
709 uint32_t initializer = length == 4 ? ops[3] : 0;
710
711 if (storage == StorageClassFunction)
712 {
713 if (!current_function)
714 SPIRV_CROSS_THROW("No function currently in scope");
715 current_function->add_local_variable(id);
716 }
717
718 set<SPIRVariable>(id, type, storage, initializer);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200719 break;
720 }
721
722 // OpPhi
723 // OpPhi is a fairly magical opcode.
724 // It selects temporary variables based on which parent block we *came from*.
725 // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
726 // variable to emulate SSA Phi.
727 case OpPhi:
728 {
729 if (!current_function)
730 SPIRV_CROSS_THROW("No function currently in scope");
731 if (!current_block)
732 SPIRV_CROSS_THROW("No block currently in scope");
733
734 uint32_t result_type = ops[0];
735 uint32_t id = ops[1];
736
737 // Instead of a temporary, create a new function-wide temporary with this ID instead.
738 auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction);
739 var.phi_variable = true;
740
741 current_function->add_local_variable(id);
742
743 for (uint32_t i = 2; i + 2 <= length; i += 2)
744 current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
745 break;
746 }
747
748 // Constants
749 case OpSpecConstant:
750 case OpConstant:
751 {
752 uint32_t id = ops[1];
753 auto &type = get<SPIRType>(ops[0]);
754
755 if (type.width > 32)
756 set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
757 else
758 set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
759 break;
760 }
761
762 case OpSpecConstantFalse:
763 case OpConstantFalse:
764 {
765 uint32_t id = ops[1];
766 set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
767 break;
768 }
769
770 case OpSpecConstantTrue:
771 case OpConstantTrue:
772 {
773 uint32_t id = ops[1];
774 set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
775 break;
776 }
777
778 case OpConstantNull:
779 {
780 uint32_t id = ops[1];
781 uint32_t type = ops[0];
Hans-Kristian Arntzenb8905bb2020-03-26 11:21:23 +0100782 ir.make_constant_null(id, type, true);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200783 break;
784 }
785
786 case OpSpecConstantComposite:
787 case OpConstantComposite:
788 {
789 uint32_t id = ops[1];
790 uint32_t type = ops[0];
791
792 auto &ctype = get<SPIRType>(type);
793
794 // We can have constants which are structs and arrays.
795 // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
796 // can refer to.
797 if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
798 {
799 set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
800 }
801 else
802 {
803 uint32_t elements = length - 2;
804 if (elements > 4)
805 SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
806
807 SPIRConstant remapped_constant_ops[4];
808 const SPIRConstant *c[4];
809 for (uint32_t i = 0; i < elements; i++)
810 {
811 // Specialization constants operations can also be part of this.
812 // We do not know their value, so any attempt to query SPIRConstant later
813 // will fail. We can only propagate the ID of the expression and use to_expression on it.
814 auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
Hans-Kristian Arntzendf3e21a2019-03-27 10:51:23 +0100815 auto *undef_op = maybe_get<SPIRUndef>(ops[2 + i]);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200816 if (constant_op)
817 {
818 if (op == OpConstantComposite)
819 SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
820
821 remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
822 remapped_constant_ops[i].self = constant_op->self;
823 remapped_constant_ops[i].constant_type = constant_op->basetype;
824 remapped_constant_ops[i].specialization = true;
825 c[i] = &remapped_constant_ops[i];
826 }
Hans-Kristian Arntzendf3e21a2019-03-27 10:51:23 +0100827 else if (undef_op)
828 {
829 // Undefined, just pick 0.
830 remapped_constant_ops[i].make_null(get<SPIRType>(undef_op->basetype));
831 remapped_constant_ops[i].constant_type = undef_op->basetype;
832 c[i] = &remapped_constant_ops[i];
833 }
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200834 else
835 c[i] = &get<SPIRConstant>(ops[2 + i]);
836 }
837 set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
838 }
839 break;
840 }
841
842 // Functions
843 case OpFunction:
844 {
845 uint32_t res = ops[0];
846 uint32_t id = ops[1];
847 // Control
848 uint32_t type = ops[3];
849
850 if (current_function)
851 SPIRV_CROSS_THROW("Must end a function before starting a new one!");
852
853 current_function = &set<SPIRFunction>(id, res, type);
854 break;
855 }
856
857 case OpFunctionParameter:
858 {
859 uint32_t type = ops[0];
860 uint32_t id = ops[1];
861
862 if (!current_function)
863 SPIRV_CROSS_THROW("Must be in a function!");
864
865 current_function->add_parameter(type, id);
866 set<SPIRVariable>(id, type, StorageClassFunction);
867 break;
868 }
869
870 case OpFunctionEnd:
871 {
872 if (current_block)
873 {
874 // Very specific error message, but seems to come up quite often.
875 SPIRV_CROSS_THROW(
876 "Cannot end a function before ending the current block.\n"
877 "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
878 }
879 current_function = nullptr;
880 break;
881 }
882
883 // Blocks
884 case OpLabel:
885 {
886 // OpLabel always starts a block.
887 if (!current_function)
888 SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
889
890 uint32_t id = ops[0];
891
892 current_function->blocks.push_back(id);
893 if (!current_function->entry_block)
894 current_function->entry_block = id;
895
896 if (current_block)
897 SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
898
899 current_block = &set<SPIRBlock>(id);
900 break;
901 }
902
903 // Branch instructions end blocks.
904 case OpBranch:
905 {
906 if (!current_block)
907 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
908
909 uint32_t target = ops[0];
910 current_block->terminator = SPIRBlock::Direct;
911 current_block->next_block = target;
912 current_block = nullptr;
913 break;
914 }
915
916 case OpBranchConditional:
917 {
918 if (!current_block)
919 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
920
921 current_block->condition = ops[0];
922 current_block->true_block = ops[1];
923 current_block->false_block = ops[2];
924
925 current_block->terminator = SPIRBlock::Select;
926 current_block = nullptr;
927 break;
928 }
929
930 case OpSwitch:
931 {
932 if (!current_block)
933 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
934
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +0200935 current_block->terminator = SPIRBlock::MultiSelect;
936
937 current_block->condition = ops[0];
938 current_block->default_block = ops[1];
939
940 for (uint32_t i = 2; i + 2 <= length; i += 2)
941 current_block->cases.push_back({ ops[i], ops[i + 1] });
942
943 // If we jump to next block, make it break instead since we're inside a switch case block at that point.
944 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
945
946 current_block = nullptr;
947 break;
948 }
949
950 case OpKill:
951 {
952 if (!current_block)
953 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
954 current_block->terminator = SPIRBlock::Kill;
955 current_block = nullptr;
956 break;
957 }
958
959 case OpReturn:
960 {
961 if (!current_block)
962 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
963 current_block->terminator = SPIRBlock::Return;
964 current_block = nullptr;
965 break;
966 }
967
968 case OpReturnValue:
969 {
970 if (!current_block)
971 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
972 current_block->terminator = SPIRBlock::Return;
973 current_block->return_value = ops[0];
974 current_block = nullptr;
975 break;
976 }
977
978 case OpUnreachable:
979 {
980 if (!current_block)
981 SPIRV_CROSS_THROW("Trying to end a non-existing block.");
982 current_block->terminator = SPIRBlock::Unreachable;
983 current_block = nullptr;
984 break;
985 }
986
987 case OpSelectionMerge:
988 {
989 if (!current_block)
990 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
991
992 current_block->next_block = ops[0];
993 current_block->merge = SPIRBlock::MergeSelection;
994 ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
995
996 if (length >= 2)
997 {
998 if (ops[1] & SelectionControlFlattenMask)
999 current_block->hint = SPIRBlock::HintFlatten;
1000 else if (ops[1] & SelectionControlDontFlattenMask)
1001 current_block->hint = SPIRBlock::HintDontFlatten;
1002 }
1003 break;
1004 }
1005
1006 case OpLoopMerge:
1007 {
1008 if (!current_block)
1009 SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1010
1011 current_block->merge_block = ops[0];
1012 current_block->continue_block = ops[1];
1013 current_block->merge = SPIRBlock::MergeLoop;
1014
1015 ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
1016 ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
1017
Hans-Kristian Arntzen333980a2019-09-05 12:43:40 +02001018 ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001019
1020 // Don't add loop headers to continue blocks,
1021 // which would make it impossible branch into the loop header since
1022 // they are treated as continues.
Hans-Kristian Arntzen333980a2019-09-05 12:43:40 +02001023 if (current_block->continue_block != BlockID(current_block->self))
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001024 ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
1025
1026 if (length >= 3)
1027 {
1028 if (ops[2] & LoopControlUnrollMask)
1029 current_block->hint = SPIRBlock::HintUnroll;
1030 else if (ops[2] & LoopControlDontUnrollMask)
1031 current_block->hint = SPIRBlock::HintDontUnroll;
1032 }
1033 break;
1034 }
1035
1036 case OpSpecConstantOp:
1037 {
1038 if (length < 3)
1039 SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1040
1041 uint32_t result_type = ops[0];
1042 uint32_t id = ops[1];
1043 auto spec_op = static_cast<Op>(ops[2]);
1044
1045 set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
1046 break;
1047 }
1048
Hans-Kristian Arntzen65af09d2019-05-28 13:41:46 +02001049 case OpLine:
1050 {
1051 // OpLine might come at global scope, but we don't care about those since they will not be declared in any
1052 // meaningful correct order.
Hans-Kristian Arntzen48a7da42019-05-28 15:51:42 +02001053 // Ignore all OpLine directives which live outside a function.
Hans-Kristian Arntzen65af09d2019-05-28 13:41:46 +02001054 if (current_block)
1055 current_block->ops.push_back(instruction);
1056
Hans-Kristian Arntzen48a7da42019-05-28 15:51:42 +02001057 // Line directives may arrive before first OpLabel.
1058 // Treat this as the line of the function declaration,
1059 // so warnings for arguments can propagate properly.
1060 if (current_function)
Hans-Kristian Arntzen65af09d2019-05-28 13:41:46 +02001061 {
1062 // Store the first one we find and emit it before creating the function prototype.
1063 if (current_function->entry_line.file_id == 0)
1064 {
1065 current_function->entry_line.file_id = ops[0];
1066 current_function->entry_line.line_literal = ops[1];
1067 }
1068 }
1069 break;
1070 }
1071
Lifeng Pan5ca87792019-07-04 16:03:06 +08001072 case OpNoLine:
1073 {
1074 // OpNoLine might come at global scope.
1075 if (current_block)
1076 current_block->ops.push_back(instruction);
Hans-Kristian Arntzen13378ad2019-07-05 10:25:18 +02001077 break;
Lifeng Pan5ca87792019-07-04 16:03:06 +08001078 }
1079
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001080 // Actual opcodes.
1081 default:
1082 {
1083 if (!current_block)
1084 SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1085
1086 current_block->ops.push_back(instruction);
1087 break;
1088 }
1089 }
1090}
1091
1092bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
1093{
1094 if (a.basetype != b.basetype)
1095 return false;
1096 if (a.width != b.width)
1097 return false;
1098 if (a.vecsize != b.vecsize)
1099 return false;
1100 if (a.columns != b.columns)
1101 return false;
1102 if (a.array.size() != b.array.size())
1103 return false;
1104
1105 size_t array_count = a.array.size();
1106 if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
1107 return false;
1108
1109 if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
1110 {
1111 if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
1112 return false;
1113 }
1114
1115 if (a.member_types.size() != b.member_types.size())
1116 return false;
1117
1118 size_t member_types = a.member_types.size();
1119 for (size_t i = 0; i < member_types; i++)
1120 {
1121 if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
1122 return false;
1123 }
1124
1125 return true;
1126}
1127
1128bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
1129{
1130 auto &type = get<SPIRType>(v.basetype);
Hans-Kristian Arntzenb6298782019-01-10 14:04:01 +01001131
1132 auto *type_meta = ir.find_meta(type.self);
1133
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001134 bool ssbo = v.storage == StorageClassStorageBuffer ||
Hans-Kristian Arntzenb6298782019-01-10 14:04:01 +01001135 (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
Hans-Kristian Arntzen5bcf02f2018-10-05 11:30:57 +02001136 bool image = type.basetype == SPIRType::Image;
1137 bool counter = type.basetype == SPIRType::AtomicCounter;
1138
1139 bool is_restrict;
1140 if (ssbo)
1141 is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
1142 else
1143 is_restrict = ir.has_decoration(v.self, DecorationRestrict);
1144
1145 return !is_restrict && (ssbo || image || counter);
1146}
Hans-Kristian Arntzena489ba72019-04-02 11:19:03 +02001147} // namespace SPIRV_CROSS_NAMESPACE