blob: b738734460ed1c334df94662408cc853f1de19cc [file] [log] [blame]
Chia-I Wuf1e2e992014-12-27 14:12:52 +08001// XGL tests
2//
3// Copyright (C) 2014 LunarG, Inc.
4//
5// Permission is hereby granted, free of charge, to any person obtaining a
6// copy of this software and associated documentation files (the "Software"),
7// to deal in the Software without restriction, including without limitation
8// the rights to use, copy, modify, merge, publish, distribute, sublicense,
9// and/or sell copies of the Software, and to permit persons to whom the
10// Software is furnished to do so, subject to the following conditions:
11//
12// The above copyright notice and this permission notice shall be included
13// in all copies or substantial portions of the Software.
14//
15// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21// DEALINGS IN THE SOFTWARE.
22
23#include <iostream>
24#include <string.h> // memset(), memcmp()
25#include "xgltestbinding.h"
26
27namespace {
28
29#define DERIVED_OBJECT_INIT(create_func, ...) \
30 do { \
31 obj_type obj; \
32 if (EXPECT(create_func(__VA_ARGS__, &obj) == XGL_SUCCESS)) \
33 base_type::init(obj); \
34 } while (0)
35
36#define STRINGIFY(x) #x
37#define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
38
39xgl_testing::ErrorCallback error_callback;
40
41bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function)
42{
43 if (error_callback) {
44 error_callback(expr, file, line, function);
45 } else {
46 std::cerr << file << ":" << line << ": " << function <<
47 ": Expectation `" << expr << "' failed.\n";
48 }
49
50 return false;
51}
52
53template<class T, class S>
54std::vector<T> make_objects(const std::vector<S> &v)
55{
56 std::vector<T> objs;
57 objs.reserve(v.size());
58 for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end(); it++)
59 objs.push_back((*it)->obj());
60 return objs;
61}
62
63template<typename T>
64std::vector<T> get_info(XGL_PHYSICAL_GPU gpu, XGL_PHYSICAL_GPU_INFO_TYPE type, size_t min_elems)
65{
66 std::vector<T> info;
Jon Ashburnf7e282a2015-01-22 13:33:15 -070067 size_t size = sizeof(T);
Chia-I Wuf1e2e992014-12-27 14:12:52 +080068 if (EXPECT(xglGetGpuInfo(gpu, type, &size, NULL) == XGL_SUCCESS && size % sizeof(T) == 0)) {
69 info.resize(size / sizeof(T));
70 if (!EXPECT(xglGetGpuInfo(gpu, type, &size, &info[0]) == XGL_SUCCESS && size == info.size() * sizeof(T)))
71 info.clear();
72 }
73
74 if (info.size() < min_elems)
75 info.resize(min_elems);
76
77 return info;
78}
79
80template<typename T>
81std::vector<T> get_info(XGL_BASE_OBJECT obj, XGL_OBJECT_INFO_TYPE type, size_t min_elems)
82{
83 std::vector<T> info;
Jon Ashburnf7e282a2015-01-22 13:33:15 -070084 size_t size = sizeof(T);
Chia-I Wuf1e2e992014-12-27 14:12:52 +080085 if (EXPECT(xglGetObjectInfo(obj, type, &size, NULL) == XGL_SUCCESS && size % sizeof(T) == 0)) {
86 info.resize(size / sizeof(T));
87 if (!EXPECT(xglGetObjectInfo(obj, type, &size, &info[0]) == XGL_SUCCESS && size == info.size() * sizeof(T)))
88 info.clear();
89 }
90
91 if (info.size() < min_elems)
92 info.resize(min_elems);
93
94 return info;
95}
96
97} // namespace
98
99namespace xgl_testing {
100
101void set_error_callback(ErrorCallback callback)
102{
103 error_callback = callback;
104}
105
106XGL_PHYSICAL_GPU_PROPERTIES PhysicalGpu::properties() const
107{
108 return get_info<XGL_PHYSICAL_GPU_PROPERTIES>(gpu_, XGL_INFO_TYPE_PHYSICAL_GPU_PROPERTIES, 1)[0];
109}
110
111XGL_PHYSICAL_GPU_PERFORMANCE PhysicalGpu::performance() const
112{
113 return get_info<XGL_PHYSICAL_GPU_PERFORMANCE>(gpu_, XGL_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE, 1)[0];
114}
115
116std::vector<XGL_PHYSICAL_GPU_QUEUE_PROPERTIES> PhysicalGpu::queue_properties() const
117{
118 return get_info<XGL_PHYSICAL_GPU_QUEUE_PROPERTIES>(gpu_, XGL_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES, 0);
119}
120
121XGL_PHYSICAL_GPU_MEMORY_PROPERTIES PhysicalGpu::memory_properties() const
122{
123 return get_info<XGL_PHYSICAL_GPU_MEMORY_PROPERTIES>(gpu_, XGL_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES, 1)[0];
124}
125
126std::vector<const char *> PhysicalGpu::layers(std::vector<char> &buf) const
127{
128 const size_t max_layer_count = 16;
129 const size_t max_string_size = 256;
130
131 buf.resize(max_layer_count * max_string_size);
132
133 std::vector<const char *> layers;
134 layers.reserve(max_layer_count);
135 for (size_t i = 0; i < max_layer_count; i++)
136 layers.push_back(&buf[0] + max_string_size * i);
137
138 char * const *out = const_cast<char * const *>(&layers[0]);
139 size_t count;
Mark Lobodzinski391bb6d2015-01-09 15:12:03 -0600140 if (!EXPECT(xglEnumerateLayers(gpu_, max_layer_count, max_string_size, &count, out, NULL) == XGL_SUCCESS))
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800141 count = 0;
142 layers.resize(count);
143
144 return layers;
145}
146
147std::vector<const char *> PhysicalGpu::extensions() const
148{
149 static const char *known_exts[] = {
150 "XGL_WSI_X11",
151 };
152
153 std::vector<const char *> exts;
154 for (int i = 0; i < sizeof(known_exts) / sizeof(known_exts[0]); i++) {
155 XGL_RESULT err = xglGetExtensionSupport(gpu_, known_exts[i]);
156 if (err == XGL_SUCCESS)
157 exts.push_back(known_exts[i]);
158 }
159
160 return exts;
161}
162
163XGL_GPU_COMPATIBILITY_INFO PhysicalGpu::compatibility(const PhysicalGpu &other) const
164{
165 XGL_GPU_COMPATIBILITY_INFO data;
166 if (!EXPECT(xglGetMultiGpuCompatibility(gpu_, other.gpu_, &data) == XGL_SUCCESS))
167 memset(&data, 0, sizeof(data));
168
169 return data;
170}
171
172void BaseObject::init(XGL_BASE_OBJECT obj, bool own)
173{
174 EXPECT(!initialized());
175 reinit(obj, own);
176}
177
178void BaseObject::reinit(XGL_BASE_OBJECT obj, bool own)
179{
180 obj_ = obj;
181 own_obj_ = own;
182}
183
184uint32_t BaseObject::memory_allocation_count() const
185{
186 return memory_requirements().size();
187}
188
189std::vector<XGL_MEMORY_REQUIREMENTS> BaseObject::memory_requirements() const
190{
191 std::vector<XGL_MEMORY_REQUIREMENTS> info =
192 get_info<XGL_MEMORY_REQUIREMENTS>(obj_, XGL_INFO_TYPE_MEMORY_REQUIREMENTS, 0);
193 if (info.size() == 1 && !info[0].size)
194 info.clear();
195
196 return info;
197}
198
199void Object::init(XGL_OBJECT obj, bool own)
200{
201 BaseObject::init(obj, own);
202 mem_alloc_count_ = memory_allocation_count();
203}
204
205void Object::reinit(XGL_OBJECT obj, bool own)
206{
207 cleanup();
208 BaseObject::reinit(obj, own);
209 mem_alloc_count_ = memory_allocation_count();
210}
211
212void Object::cleanup()
213{
214 if (!initialized())
215 return;
216
217 unbind_memory();
218
219 if (internal_mems_) {
220 delete[] internal_mems_;
221 internal_mems_ = NULL;
222 primary_mem_ = NULL;
223 }
224
225 mem_alloc_count_ = 0;
226
227 if (own())
228 EXPECT(xglDestroyObject(obj()) == XGL_SUCCESS);
229}
230
231void Object::bind_memory(uint32_t alloc_idx, const GpuMemory &mem, XGL_GPU_SIZE mem_offset)
232{
233 EXPECT(!alloc_idx && xglBindObjectMemory(obj(), mem.obj(), mem_offset) == XGL_SUCCESS);
234}
235
Chia-I Wu1a28fe02015-01-01 07:55:04 +0800236void Object::bind_memory(uint32_t alloc_idx, XGL_GPU_SIZE offset, XGL_GPU_SIZE size,
237 const GpuMemory &mem, XGL_GPU_SIZE mem_offset)
238{
239 EXPECT(!alloc_idx && xglBindObjectMemoryRange(obj(), offset, size, mem.obj(), mem_offset) == XGL_SUCCESS);
240}
241
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800242void Object::unbind_memory(uint32_t alloc_idx)
243{
244 EXPECT(!alloc_idx && xglBindObjectMemory(obj(), XGL_NULL_HANDLE, 0) == XGL_SUCCESS);
245}
246
247void Object::unbind_memory()
248{
249 for (uint32_t i = 0; i < mem_alloc_count_; i++)
250 unbind_memory(i);
251}
252
253void Object::alloc_memory(const Device &dev, bool for_linear_img)
254{
255 if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
256 return;
257
258 internal_mems_ = new GpuMemory[mem_alloc_count_];
259
260 const std::vector<XGL_MEMORY_REQUIREMENTS> mem_reqs = memory_requirements();
261 for (int i = 0; i < mem_reqs.size(); i++) {
262 XGL_MEMORY_ALLOC_INFO info = GpuMemory::alloc_info(mem_reqs[i]);
263
Chia-I Wu1a28fe02015-01-01 07:55:04 +0800264 // prefer CPU visible heaps
265 std::vector<uint32_t> non_visible_heaps;
266 info.heapCount = 0;
267 for (uint32_t j = 0; j < mem_reqs[i].heapCount; j++) {
268 const uint32_t heap = mem_reqs[i].heaps[j];
269 const XGL_MEMORY_HEAP_PROPERTIES &props = dev.heap_properties()[heap];
270
271 if (props.flags & XGL_MEMORY_HEAP_CPU_VISIBLE_BIT)
272 info.heaps[info.heapCount++] = heap;
273 else
274 non_visible_heaps.push_back(heap);
275 }
276 for (std::vector<uint32_t>::const_iterator it = non_visible_heaps.begin(); it != non_visible_heaps.end(); it++)
277 info.heaps[info.heapCount++] = *it;
278
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800279 primary_mem_ = &internal_mems_[i];
280
281 internal_mems_[i].init(dev, info);
282 bind_memory(i, internal_mems_[i], 0);
283 }
284}
285
286void Object::alloc_memory(const std::vector<XGL_GPU_MEMORY> &mems)
287{
288 if (!EXPECT(!internal_mems_) || !mem_alloc_count_)
289 return;
290
291 internal_mems_ = new GpuMemory[mem_alloc_count_];
292
293 const std::vector<XGL_MEMORY_REQUIREMENTS> mem_reqs = memory_requirements();
294 if (!EXPECT(mem_reqs.size() == mems.size()))
295 return;
296
297 for (int i = 0; i < mem_reqs.size(); i++) {
298 primary_mem_ = &internal_mems_[i];
299
300 internal_mems_[i].init(mems[i]);
301 bind_memory(i, internal_mems_[i], 0);
302 }
303}
304
305std::vector<XGL_GPU_MEMORY> Object::memories() const
306{
307 std::vector<XGL_GPU_MEMORY> mems;
308 if (internal_mems_) {
309 mems.reserve(mem_alloc_count_);
310 for (uint32_t i = 0; i < mem_alloc_count_; i++)
311 mems.push_back(internal_mems_[i].obj());
312 }
313
314 return mems;
315}
316
317Device::~Device()
318{
319 if (!initialized())
320 return;
321
322 for (int i = 0; i < QUEUE_COUNT; i++) {
323 for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++)
324 delete *it;
325 queues_[i].clear();
326 }
327
328 EXPECT(xglDestroyDevice(obj()) == XGL_SUCCESS);
329}
330
Chia-I Wu510c9992015-01-06 10:40:45 +0800331void Device::init(bool enable_layers)
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800332{
333 // request all queues
334 const std::vector<XGL_PHYSICAL_GPU_QUEUE_PROPERTIES> queue_props = gpu_.queue_properties();
335 std::vector<XGL_DEVICE_QUEUE_CREATE_INFO> queue_info;
336 queue_info.reserve(queue_props.size());
337 for (int i = 0; i < queue_props.size(); i++) {
338 XGL_DEVICE_QUEUE_CREATE_INFO qi = {};
339 qi.queueNodeIndex = i;
340 qi.queueCount = queue_props[i].queueCount;
341 queue_info.push_back(qi);
342 }
343
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800344 XGL_LAYER_CREATE_INFO layer_info = {};
345 layer_info.sType = XGL_STRUCTURE_TYPE_LAYER_CREATE_INFO;
Chia-I Wu510c9992015-01-06 10:40:45 +0800346
347 std::vector<const char *> layers;
348 std::vector<char> layer_buf;
349 // request all layers
350 if (enable_layers) {
351 layers = gpu_.layers(layer_buf);
352 layer_info.layerCount = layers.size();
353 layer_info.ppActiveLayerNames = &layers[0];
354 }
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800355
356 const std::vector<const char *> exts = gpu_.extensions();
357
358 XGL_DEVICE_CREATE_INFO dev_info = {};
359 dev_info.sType = XGL_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
Jon Ashburnf7e282a2015-01-22 13:33:15 -0700360 dev_info.pNext = (enable_layers) ? static_cast<void *>(&layer_info) : NULL;
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800361 dev_info.queueRecordCount = queue_info.size();
362 dev_info.pRequestedQueues = &queue_info[0];
363 dev_info.extensionCount = exts.size();
364 dev_info.ppEnabledExtensionNames = &exts[0];
365 dev_info.maxValidationLevel = XGL_VALIDATION_LEVEL_END_RANGE;
366 dev_info.flags = XGL_DEVICE_CREATE_VALIDATION_BIT;
367
368 init(dev_info);
369}
370
371void Device::init(const XGL_DEVICE_CREATE_INFO &info)
372{
373 DERIVED_OBJECT_INIT(xglCreateDevice, gpu_.obj(), &info);
374
375 init_queues();
376 init_heap_props();
377 init_formats();
378}
379
380void Device::init_queues()
381{
382 const struct {
383 QueueIndex index;
384 XGL_QUEUE_TYPE type;
385 } queue_mapping[] = {
386 { GRAPHICS, XGL_QUEUE_TYPE_GRAPHICS },
387 { COMPUTE, XGL_QUEUE_TYPE_COMPUTE },
388 { DMA, XGL_QUEUE_TYPE_DMA },
389 };
390
391 for (int i = 0; i < QUEUE_COUNT; i++) {
392 uint32_t idx = 0;
393
394 while (true) {
395 XGL_QUEUE queue;
396 XGL_RESULT err = xglGetDeviceQueue(obj(), queue_mapping[i].type, idx++, &queue);
397 if (err != XGL_SUCCESS)
398 break;
399 queues_[queue_mapping[i].index].push_back(new Queue(queue));
400 }
401 }
402
403 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
404}
405
406void Device::init_heap_props()
407{
408 uint32_t count;
409 if (!EXPECT(xglGetMemoryHeapCount(obj(), &count) == XGL_SUCCESS && count))
410 return;
411 if (count > XGL_MAX_MEMORY_HEAPS)
412 count = XGL_MAX_MEMORY_HEAPS;
413
414 heap_props_.reserve(count);
415 for (uint32_t i = 0; i < count; i++) {
416 const XGL_MEMORY_HEAP_INFO_TYPE type = XGL_INFO_TYPE_MEMORY_HEAP_PROPERTIES;
417 XGL_MEMORY_HEAP_PROPERTIES props;
418 XGL_SIZE size = sizeof(props);
419 if (EXPECT(xglGetMemoryHeapInfo(obj(), i, type, &size, &props) == XGL_SUCCESS && size == sizeof(props)))
420 heap_props_.push_back(props);
421 }
422}
423
424void Device::init_formats()
425{
426 for (int ch = XGL_CH_FMT_UNDEFINED; ch <= XGL_MAX_CH_FMT; ch++) {
427 for (int num = XGL_NUM_FMT_UNDEFINED; num <= XGL_MAX_NUM_FMT; num++) {
428 const XGL_FORMAT fmt = { static_cast<XGL_CHANNEL_FORMAT>(ch),
429 static_cast<XGL_NUM_FORMAT>(num) };
430 const XGL_FORMAT_PROPERTIES props = format_properties(fmt);
431
432 if (props.linearTilingFeatures) {
433 const Format tmp = { fmt, XGL_LINEAR_TILING, props.linearTilingFeatures };
434 formats_.push_back(tmp);
435 }
436
437 if (props.optimalTilingFeatures) {
438 const Format tmp = { fmt, XGL_OPTIMAL_TILING, props.optimalTilingFeatures };
439 formats_.push_back(tmp);
440 }
441 }
442 }
443
444 EXPECT(!formats_.empty());
445}
446
447XGL_FORMAT_PROPERTIES Device::format_properties(XGL_FORMAT format)
448{
449 const XGL_FORMAT_INFO_TYPE type = XGL_INFO_TYPE_FORMAT_PROPERTIES;
450 XGL_FORMAT_PROPERTIES data;
451 size_t size = sizeof(data);
452 if (!EXPECT(xglGetFormatInfo(obj(), format, type, &size, &data) == XGL_SUCCESS && size == sizeof(data)))
453 memset(&data, 0, sizeof(data));
454
455 return data;
456}
457
458void Device::wait()
459{
460 EXPECT(xglDeviceWaitIdle(obj()) == XGL_SUCCESS);
461}
462
463XGL_RESULT Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout)
464{
465 const std::vector<XGL_FENCE> fence_objs = make_objects<XGL_FENCE>(fences);
466 XGL_RESULT err = xglWaitForFences(obj(), fence_objs.size(), &fence_objs[0], wait_all, timeout);
467 EXPECT(err == XGL_SUCCESS || err == XGL_TIMEOUT);
468
469 return err;
470}
471
472void Queue::submit(const std::vector<const CmdBuffer *> &cmds, const std::vector<XGL_MEMORY_REF> &mem_refs, Fence &fence)
473{
474 const std::vector<XGL_CMD_BUFFER> cmd_objs = make_objects<XGL_CMD_BUFFER>(cmds);
475 EXPECT(xglQueueSubmit(obj(), cmd_objs.size(), &cmd_objs[0], mem_refs.size(), &mem_refs[0], fence.obj()) == XGL_SUCCESS);
476}
477
478void Queue::submit(const CmdBuffer &cmd, const std::vector<XGL_MEMORY_REF> &mem_refs, Fence &fence)
479{
480 submit(std::vector<const CmdBuffer*>(1, &cmd), mem_refs, fence);
481}
482
483void Queue::submit(const CmdBuffer &cmd, const std::vector<XGL_MEMORY_REF> &mem_refs)
484{
485 Fence fence;
486 submit(cmd, mem_refs, fence);
487}
488
489void Queue::set_global_mem_references(const std::vector<XGL_MEMORY_REF> &mem_refs)
490{
491 EXPECT(xglQueueSetGlobalMemReferences(obj(), mem_refs.size(), &mem_refs[0]) == XGL_SUCCESS);
492}
493
494void Queue::wait()
495{
496 EXPECT(xglQueueWaitIdle(obj()) == XGL_SUCCESS);
497}
498
499void Queue::signal_semaphore(QueueSemaphore &sem)
500{
501 EXPECT(xglSignalQueueSemaphore(obj(), sem.obj()) == XGL_SUCCESS);
502}
503
504void Queue::wait_semaphore(QueueSemaphore &sem)
505{
506 EXPECT(xglWaitQueueSemaphore(obj(), sem.obj()) == XGL_SUCCESS);
507}
508
509GpuMemory::~GpuMemory()
510{
511 if (initialized() && own())
512 EXPECT(xglFreeMemory(obj()) == XGL_SUCCESS);
513}
514
515void GpuMemory::init(const Device &dev, const XGL_MEMORY_ALLOC_INFO &info)
516{
517 DERIVED_OBJECT_INIT(xglAllocMemory, dev.obj(), &info);
518}
519
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800520void GpuMemory::init(const Device &dev, size_t size, const void *data)
521{
522 DERIVED_OBJECT_INIT(xglPinSystemMemory, dev.obj(), data, size);
523}
524
525void GpuMemory::init(const Device &dev, const XGL_MEMORY_OPEN_INFO &info)
526{
527 DERIVED_OBJECT_INIT(xglOpenSharedMemory, dev.obj(), &info);
528}
529
530void GpuMemory::init(const Device &dev, const XGL_PEER_MEMORY_OPEN_INFO &info)
531{
532 DERIVED_OBJECT_INIT(xglOpenPeerMemory, dev.obj(), &info);
533}
534
535void GpuMemory::set_priority(XGL_MEMORY_PRIORITY priority)
536{
537 EXPECT(xglSetMemoryPriority(obj(), priority) == XGL_SUCCESS);
538}
539
540const void *GpuMemory::map(XGL_FLAGS flags) const
541{
542 void *data;
543 if (!EXPECT(xglMapMemory(obj(), flags, &data) == XGL_SUCCESS))
544 data = NULL;
545
546 return data;
547}
548
549void *GpuMemory::map(XGL_FLAGS flags)
550{
551 void *data;
552 if (!EXPECT(xglMapMemory(obj(), flags, &data) == XGL_SUCCESS))
553 data = NULL;
554
555 return data;
556}
557
558void GpuMemory::unmap() const
559{
560 EXPECT(xglUnmapMemory(obj()) == XGL_SUCCESS);
561}
562
563void Fence::init(const Device &dev, const XGL_FENCE_CREATE_INFO &info)
564{
565 DERIVED_OBJECT_INIT(xglCreateFence, dev.obj(), &info);
566 alloc_memory(dev);
567}
568
569void QueueSemaphore::init(const Device &dev, const XGL_QUEUE_SEMAPHORE_CREATE_INFO &info)
570{
571 DERIVED_OBJECT_INIT(xglCreateQueueSemaphore, dev.obj(), &info);
572 alloc_memory(dev);
573}
574
575void QueueSemaphore::init(const Device &dev, const XGL_QUEUE_SEMAPHORE_OPEN_INFO &info)
576{
577 DERIVED_OBJECT_INIT(xglOpenSharedQueueSemaphore, dev.obj(), &info);
578}
579
580void Event::init(const Device &dev, const XGL_EVENT_CREATE_INFO &info)
581{
582 DERIVED_OBJECT_INIT(xglCreateEvent, dev.obj(), &info);
583 alloc_memory(dev);
584}
585
586void Event::set()
587{
588 EXPECT(xglSetEvent(obj()) == XGL_SUCCESS);
589}
590
591void Event::reset()
592{
593 EXPECT(xglResetEvent(obj()) == XGL_SUCCESS);
594}
595
596void QueryPool::init(const Device &dev, const XGL_QUERY_POOL_CREATE_INFO &info)
597{
598 DERIVED_OBJECT_INIT(xglCreateQueryPool, dev.obj(), &info);
599 alloc_memory(dev);
600}
601
602XGL_RESULT QueryPool::results(uint32_t start, uint32_t count, size_t size, void *data)
603{
604 size_t tmp = size;
605 XGL_RESULT err = xglGetQueryPoolResults(obj(), start, count, &tmp, data);
606 if (err == XGL_SUCCESS) {
607 if (!EXPECT(tmp == size))
608 memset(data, 0, size);
609 } else {
610 EXPECT(err == XGL_NOT_READY);
611 }
612
613 return err;
614}
615
Chia-I Wu1a28fe02015-01-01 07:55:04 +0800616void Buffer::init(const Device &dev, const XGL_BUFFER_CREATE_INFO &info)
617{
618 init_no_mem(dev, info);
619 alloc_memory(dev);
620}
621
622void Buffer::init_no_mem(const Device &dev, const XGL_BUFFER_CREATE_INFO &info)
623{
624 DERIVED_OBJECT_INIT(xglCreateBuffer, dev.obj(), &info);
625 create_info_ = info;
626}
627
628void BufferView::init(const Device &dev, const XGL_BUFFER_VIEW_CREATE_INFO &info)
629{
630 DERIVED_OBJECT_INIT(xglCreateBufferView, dev.obj(), &info);
631 alloc_memory(dev);
632}
633
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800634void Image::init(const Device &dev, const XGL_IMAGE_CREATE_INFO &info)
635{
636 init_no_mem(dev, info);
637 alloc_memory(dev, info.tiling == XGL_LINEAR_TILING);
638}
639
640void Image::init_no_mem(const Device &dev, const XGL_IMAGE_CREATE_INFO &info)
641{
642 DERIVED_OBJECT_INIT(xglCreateImage, dev.obj(), &info);
643 init_info(dev, info);
644}
645
646void Image::init(const Device &dev, const XGL_PEER_IMAGE_OPEN_INFO &info, const XGL_IMAGE_CREATE_INFO &original_info)
647{
648 XGL_IMAGE img;
649 XGL_GPU_MEMORY mem;
650 EXPECT(xglOpenPeerImage(dev.obj(), &info, &img, &mem) == XGL_SUCCESS);
651 Object::init(img);
652
653 init_info(dev, original_info);
654 alloc_memory(std::vector<XGL_GPU_MEMORY>(1, mem));
655}
656
657void Image::init_info(const Device &dev, const XGL_IMAGE_CREATE_INFO &info)
658{
659 create_info_ = info;
660
661 for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
662 if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
663 format_features_ = it->features;
664 break;
665 }
666 }
667}
668
Chia-I Wu1a28fe02015-01-01 07:55:04 +0800669void Image::bind_memory(uint32_t alloc_idx, const XGL_IMAGE_MEMORY_BIND_INFO &info,
670 const GpuMemory &mem, XGL_GPU_SIZE mem_offset)
671{
672 EXPECT(!alloc_idx && xglBindImageMemoryRange(obj(), &info, mem.obj(), mem_offset) == XGL_SUCCESS);
673}
674
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800675XGL_SUBRESOURCE_LAYOUT Image::subresource_layout(const XGL_IMAGE_SUBRESOURCE &subres) const
676{
677 const XGL_SUBRESOURCE_INFO_TYPE type = XGL_INFO_TYPE_SUBRESOURCE_LAYOUT;
678 XGL_SUBRESOURCE_LAYOUT data;
679 size_t size = sizeof(data);
680 if (!EXPECT(xglGetImageSubresourceInfo(obj(), &subres, type, &size, &data) == XGL_SUCCESS && size == sizeof(data)))
681 memset(&data, 0, sizeof(data));
682
683 return data;
684}
685
686bool Image::transparent() const
687{
688 return (create_info_.tiling == XGL_LINEAR_TILING &&
689 create_info_.samples == 1 &&
690 !(create_info_.usage & (XGL_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
691 XGL_IMAGE_USAGE_DEPTH_STENCIL_BIT)));
692}
693
694void ImageView::init(const Device &dev, const XGL_IMAGE_VIEW_CREATE_INFO &info)
695{
696 DERIVED_OBJECT_INIT(xglCreateImageView, dev.obj(), &info);
697 alloc_memory(dev);
698}
699
700void ColorAttachmentView::init(const Device &dev, const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO &info)
701{
702 DERIVED_OBJECT_INIT(xglCreateColorAttachmentView, dev.obj(), &info);
703 alloc_memory(dev);
704}
705
706void DepthStencilView::init(const Device &dev, const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO &info)
707{
708 DERIVED_OBJECT_INIT(xglCreateDepthStencilView, dev.obj(), &info);
709 alloc_memory(dev);
710}
711
712void Shader::init(const Device &dev, const XGL_SHADER_CREATE_INFO &info)
713{
714 DERIVED_OBJECT_INIT(xglCreateShader, dev.obj(), &info);
715}
716
717XGL_RESULT Shader::init_try(const Device &dev, const XGL_SHADER_CREATE_INFO &info)
718{
719 XGL_SHADER sh;
720 XGL_RESULT err = xglCreateShader(dev.obj(), &info, &sh);
721 if (err == XGL_SUCCESS)
722 Object::init(sh);
723
724 return err;
725}
726
727void Pipeline::init(const Device &dev, const XGL_GRAPHICS_PIPELINE_CREATE_INFO &info)
728{
729 DERIVED_OBJECT_INIT(xglCreateGraphicsPipeline, dev.obj(), &info);
730 alloc_memory(dev);
731}
732
733void Pipeline::init(const Device &dev, const XGL_COMPUTE_PIPELINE_CREATE_INFO &info)
734{
735 DERIVED_OBJECT_INIT(xglCreateComputePipeline, dev.obj(), &info);
736 alloc_memory(dev);
737}
738
739void Pipeline::init(const Device&dev, size_t size, const void *data)
740{
741 DERIVED_OBJECT_INIT(xglLoadPipeline, dev.obj(), size, data);
742 alloc_memory(dev);
743}
744
745size_t Pipeline::store(size_t size, void *data)
746{
747 if (!EXPECT(xglStorePipeline(obj(), &size, data) == XGL_SUCCESS))
748 size = 0;
749
750 return size;
751}
752
753void PipelineDelta::init(const Device &dev, const Pipeline &p1, const Pipeline &p2)
754{
755 DERIVED_OBJECT_INIT(xglCreatePipelineDelta, dev.obj(), p1.obj(), p2.obj());
756 alloc_memory(dev);
757}
758
759void Sampler::init(const Device &dev, const XGL_SAMPLER_CREATE_INFO &info)
760{
761 DERIVED_OBJECT_INIT(xglCreateSampler, dev.obj(), &info);
762 alloc_memory(dev);
763}
764
765void DescriptorSet::init(const Device &dev, const XGL_DESCRIPTOR_SET_CREATE_INFO &info)
766{
767 DERIVED_OBJECT_INIT(xglCreateDescriptorSet, dev.obj(), &info);
768 info_ = info;
769}
770
771void DescriptorSet::attach(uint32_t start_slot, const std::vector<const Sampler *> &samplers)
772{
773 const std::vector<XGL_SAMPLER> sampler_objs = make_objects<XGL_SAMPLER>(samplers);
774 xglAttachSamplerDescriptors(obj(), start_slot, sampler_objs.size(), &sampler_objs[0]);
775}
776
777void DescriptorSet::attach(uint32_t start_slot, const std::vector<XGL_IMAGE_VIEW_ATTACH_INFO> &img_views)
778{
779 xglAttachImageViewDescriptors(obj(), start_slot, img_views.size(), &img_views[0]);
780}
781
Chia-I Wu1a28fe02015-01-01 07:55:04 +0800782void DescriptorSet::attach(uint32_t start_slot, const std::vector<XGL_BUFFER_VIEW_ATTACH_INFO> &buf_views)
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800783{
Chia-I Wu1a28fe02015-01-01 07:55:04 +0800784 xglAttachBufferViewDescriptors(obj(), start_slot, buf_views.size(), &buf_views[0]);
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800785}
786
787void DescriptorSet::attach(uint32_t start_slot, const std::vector<XGL_DESCRIPTOR_SET_ATTACH_INFO> &sets)
788{
789 xglAttachNestedDescriptors(obj(), start_slot, sets.size(), &sets[0]);
790}
791
792void DynamicVpStateObject::init(const Device &dev, const XGL_VIEWPORT_STATE_CREATE_INFO &info)
793{
794 DERIVED_OBJECT_INIT(xglCreateViewportState, dev.obj(), &info);
795 alloc_memory(dev);
796}
797
798void DynamicRsStateObject::init(const Device &dev, const XGL_RASTER_STATE_CREATE_INFO &info)
799{
800 DERIVED_OBJECT_INIT(xglCreateRasterState, dev.obj(), &info);
801 alloc_memory(dev);
802}
803
804void DynamicMsaaStateObject::init(const Device &dev, const XGL_MSAA_STATE_CREATE_INFO &info)
805{
806 DERIVED_OBJECT_INIT(xglCreateMsaaState, dev.obj(), &info);
807 alloc_memory(dev);
808}
809
810void DynamicCbStateObject::init(const Device &dev, const XGL_COLOR_BLEND_STATE_CREATE_INFO &info)
811{
812 DERIVED_OBJECT_INIT(xglCreateColorBlendState, dev.obj(), &info);
813 alloc_memory(dev);
814}
815
816void DynamicDsStateObject::init(const Device &dev, const XGL_DEPTH_STENCIL_STATE_CREATE_INFO &info)
817{
818 DERIVED_OBJECT_INIT(xglCreateDepthStencilState, dev.obj(), &info);
819 alloc_memory(dev);
820}
821
822void CmdBuffer::init(const Device &dev, const XGL_CMD_BUFFER_CREATE_INFO &info)
823{
824 DERIVED_OBJECT_INIT(xglCreateCommandBuffer, dev.obj(), &info);
825}
826
Jeremy Hayesd65ae082015-01-14 16:17:08 -0700827void CmdBuffer::begin(const XGL_CMD_BUFFER_BEGIN_INFO *info)
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800828{
Jeremy Hayesd65ae082015-01-14 16:17:08 -0700829 EXPECT(xglBeginCommandBuffer(obj(), info) == XGL_SUCCESS);
830}
831
832void CmdBuffer::begin(XGL_RENDER_PASS renderpass_obj)
833{
834 XGL_CMD_BUFFER_BEGIN_INFO info = {};
835 XGL_CMD_BUFFER_GRAPHICS_BEGIN_INFO graphics_cmd_buf_info = {
836 .sType = XGL_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO,
837 .pNext = NULL,
838 .renderPass = renderpass_obj,
839 .operation = XGL_RENDER_PASS_OPERATION_BEGIN_AND_END,
840 };
841 info.flags = XGL_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
842 XGL_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
843 info.sType = XGL_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
844 info.pNext = &graphics_cmd_buf_info;
845
846 begin(&info);
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800847}
848
849void CmdBuffer::begin()
850{
Jeremy Hayesd65ae082015-01-14 16:17:08 -0700851 XGL_CMD_BUFFER_BEGIN_INFO info = {};
852 info.flags = XGL_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
853 XGL_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT;
854 info.sType = XGL_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO;
855
856 begin(&info);
Chia-I Wuf1e2e992014-12-27 14:12:52 +0800857}
858
859void CmdBuffer::end()
860{
861 EXPECT(xglEndCommandBuffer(obj()) == XGL_SUCCESS);
862}
863
864void CmdBuffer::reset()
865{
866 EXPECT(xglResetCommandBuffer(obj()) == XGL_SUCCESS);
867}
868
869}; // namespace xgl_testing