blob: 462b763c35c10878d10ce9ea1459bfa8646dc134 [file] [log] [blame]
Gurchetan Singh46faf6b2016-08-05 14:40:07 -07001/*
Daniele Castagna7a755de2016-12-16 17:32:30 -05002 * Copyright 2016 The Chromium OS Authors. All rights reserved.
Gurchetan Singh46faf6b2016-08-05 14:40:07 -07003 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6#include <assert.h>
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -07007#include <errno.h>
Gurchetan Singh46faf6b2016-08-05 14:40:07 -07008#include <fcntl.h>
Gurchetan Singh1647fbe2016-08-03 17:14:55 -07009#include <pthread.h>
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070010#include <stdint.h>
11#include <stdio.h>
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070012#include <string.h>
Gurchetan Singhef920532016-08-12 16:38:25 -070013#include <sys/mman.h>
Daniel Hung-yu Wu9607a482017-09-12 20:05:08 +080014#include <sys/types.h>
15#include <unistd.h>
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070016#include <xf86drm.h>
17
Alistair Strachan0cfaaa52018-03-19 14:03:23 -070018#ifdef __ANDROID__
19#include <cutils/log.h>
20#include <libgen.h>
21#endif
22
Yiwei Zhangb7a64442021-09-30 05:13:10 +000023#include "drv_helpers.h"
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070024#include "drv_priv.h"
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070025#include "util.h"
26
Akshu Agrawal0337d9b2016-07-28 15:35:45 +053027#ifdef DRV_AMDGPU
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070028extern const struct backend backend_amdgpu;
Akshu Agrawal0337d9b2016-07-28 15:35:45 +053029#endif
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070030#ifdef DRV_I915
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070031extern const struct backend backend_i915;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070032#endif
Rajesh Yadav7f79cb52018-01-22 18:29:06 +053033#ifdef DRV_MSM
34extern const struct backend backend_msm;
35#endif
Niklas Schulze878fed42017-02-08 15:29:21 +010036#ifdef DRV_VC4
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070037extern const struct backend backend_vc4;
Niklas Schulze878fed42017-02-08 15:29:21 +010038#endif
Anders Delliene5bef532020-06-10 10:30:44 +010039
40// Dumb / generic drivers
41extern const struct backend backend_evdi;
42extern const struct backend backend_marvell;
Chen-Yu Tsaidd9c8642022-11-29 15:59:25 +080043extern const struct backend backend_mediatek;
Anders Delliene5bef532020-06-10 10:30:44 +010044extern const struct backend backend_meson;
45extern const struct backend backend_nouveau;
46extern const struct backend backend_komeda;
47extern const struct backend backend_radeon;
Chen-Yu Tsaidd9c8642022-11-29 15:59:25 +080048extern const struct backend backend_rockchip;
49extern const struct backend backend_sun4i_drm;
Anders Delliene5bef532020-06-10 10:30:44 +010050extern const struct backend backend_synaptics;
Gurchetan Singh73c141e2021-01-21 14:51:19 -080051extern const struct backend backend_virtgpu;
Anders Delliene5bef532020-06-10 10:30:44 +010052extern const struct backend backend_udl;
François-Denis Gonthiercea0b842020-05-22 18:02:24 -040053extern const struct backend backend_vkms;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070054
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070055static const struct backend *drv_get_backend(int fd)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070056{
57 drmVersionPtr drm_version;
58 unsigned int i;
59
60 drm_version = drmGetVersion(fd);
61
62 if (!drm_version)
63 return NULL;
64
Gurchetan Singh3e9d3832017-10-31 10:36:25 -070065 const struct backend *backend_list[] = {
Akshu Agrawal0337d9b2016-07-28 15:35:45 +053066#ifdef DRV_AMDGPU
67 &backend_amdgpu,
68#endif
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070069#ifdef DRV_I915
70 &backend_i915,
71#endif
Rajesh Yadav7f79cb52018-01-22 18:29:06 +053072#ifdef DRV_MSM
73 &backend_msm,
74#endif
Niklas Schulze878fed42017-02-08 15:29:21 +010075#ifdef DRV_VC4
76 &backend_vc4,
77#endif
Chen-Yu Tsaidd9c8642022-11-29 15:59:25 +080078 &backend_evdi, &backend_komeda, &backend_marvell, &backend_mediatek,
79 &backend_meson, &backend_nouveau, &backend_radeon, &backend_rockchip,
80 &backend_sun4i_drm, &backend_synaptics, &backend_udl, &backend_virtgpu,
81 &backend_vkms
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070082 };
83
David Stevens26fe6822020-03-09 12:23:42 +000084 for (i = 0; i < ARRAY_SIZE(backend_list); i++) {
85 const struct backend *b = backend_list[i];
David Stevens26fe6822020-03-09 12:23:42 +000086 if (!strcmp(drm_version->name, b->name)) {
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070087 drmFreeVersion(drm_version);
David Stevens26fe6822020-03-09 12:23:42 +000088 return b;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070089 }
David Stevens26fe6822020-03-09 12:23:42 +000090 }
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070091
92 drmFreeVersion(drm_version);
93 return NULL;
94}
95
96struct driver *drv_create(int fd)
97{
98 struct driver *drv;
99 int ret;
100
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800101 drv = (struct driver *)calloc(1, sizeof(*drv));
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700102
103 if (!drv)
104 return NULL;
105
Pilar Molina Lopez28cf2f12020-11-12 18:19:42 -0500106 char *minigbm_debug;
107 minigbm_debug = getenv("MINIGBM_DEBUG");
108 drv->compression = (minigbm_debug == NULL) || (strcmp(minigbm_debug, "nocompression") != 0);
109
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700110 drv->fd = fd;
111 drv->backend = drv_get_backend(fd);
112
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700113 if (!drv->backend)
114 goto free_driver;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700115
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000116 if (pthread_mutex_init(&drv->buffer_table_lock, NULL))
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700117 goto free_driver;
Gurchetan Singh1647fbe2016-08-03 17:14:55 -0700118
119 drv->buffer_table = drmHashCreate();
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700120 if (!drv->buffer_table)
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000121 goto free_buffer_table_lock;
122
123 if (pthread_mutex_init(&drv->mappings_lock, NULL))
124 goto free_buffer_table;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700125
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700126 drv->mappings = drv_array_init(sizeof(struct mapping));
127 if (!drv->mappings)
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000128 goto free_mappings_lock;
Gurchetan Singh1647fbe2016-08-03 17:14:55 -0700129
Gurchetan Singhbc9a87d2017-11-03 17:17:35 -0700130 drv->combos = drv_array_init(sizeof(struct combination));
131 if (!drv->combos)
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700132 goto free_mappings;
Gurchetan Singh179687e2016-10-28 10:07:35 -0700133
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700134 if (drv->backend->init) {
135 ret = drv->backend->init(drv);
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800136 if (ret) {
Gurchetan Singhbc9a87d2017-11-03 17:17:35 -0700137 drv_array_destroy(drv->combos);
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700138 goto free_mappings;
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800139 }
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700140 }
141
142 return drv;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700143
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700144free_mappings:
145 drv_array_destroy(drv->mappings);
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000146free_mappings_lock:
147 pthread_mutex_destroy(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700148free_buffer_table:
149 drmHashDestroy(drv->buffer_table);
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000150free_buffer_table_lock:
151 pthread_mutex_destroy(&drv->buffer_table_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700152free_driver:
153 free(drv);
154 return NULL;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700155}
156
157void drv_destroy(struct driver *drv)
158{
159 if (drv->backend->close)
160 drv->backend->close(drv);
161
Gurchetan Singhbc9a87d2017-11-03 17:17:35 -0700162 drv_array_destroy(drv->combos);
Gurchetan Singh179687e2016-10-28 10:07:35 -0700163
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000164 drv_array_destroy(drv->mappings);
165 pthread_mutex_destroy(&drv->mappings_lock);
166
167 drmHashDestroy(drv->buffer_table);
168 pthread_mutex_destroy(&drv->buffer_table_lock);
Gurchetan Singh179687e2016-10-28 10:07:35 -0700169
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700170 free(drv);
171}
172
173int drv_get_fd(struct driver *drv)
174{
175 return drv->fd;
176}
177
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800178const char *drv_get_name(struct driver *drv)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700179{
180 return drv->backend->name;
181}
182
Gurchetan Singha1892b22017-09-28 16:40:52 -0700183struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700184{
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800185 struct combination *curr, *best;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700186
Gurchetan Singha1892b22017-09-28 16:40:52 -0700187 if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700188 return 0;
189
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800190 best = NULL;
191 uint32_t i;
Gurchetan Singhbc9a87d2017-11-03 17:17:35 -0700192 for (i = 0; i < drv_array_size(drv->combos); i++) {
193 curr = drv_array_at_idx(drv->combos, i);
Gurchetan Singha1892b22017-09-28 16:40:52 -0700194 if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800195 if (!best || best->metadata.priority < curr->metadata.priority)
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800196 best = curr;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700197 }
198
Gurchetan Singh6b41fb52017-03-01 20:14:39 -0800199 return best;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700200}
201
Gurchetan Singh18578ed2017-08-03 18:23:27 -0700202struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
David Stevens26fe6822020-03-09 12:23:42 +0000203 uint64_t use_flags, bool is_test_buffer)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700204{
205
206 struct bo *bo;
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800207 bo = (struct bo *)calloc(1, sizeof(*bo));
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700208
209 if (!bo)
210 return NULL;
211
212 bo->drv = drv;
Gurchetan Singh298b7572019-09-19 09:55:18 -0700213 bo->meta.width = width;
214 bo->meta.height = height;
215 bo->meta.format = format;
216 bo->meta.use_flags = use_flags;
217 bo->meta.num_planes = drv_num_planes_from_format(format);
David Stevens26fe6822020-03-09 12:23:42 +0000218 bo->is_test_buffer = is_test_buffer;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700219
Gurchetan Singh298b7572019-09-19 09:55:18 -0700220 if (!bo->meta.num_planes) {
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700221 free(bo);
Yiwei Zhang01b69742021-09-16 04:47:54 +0000222 errno = EINVAL;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700223 return NULL;
224 }
225
226 return bo;
227}
228
Jason Macnak04c8f512021-09-29 11:38:00 -0700229static void drv_bo_mapping_destroy(struct bo *bo)
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000230{
231 struct driver *drv = bo->drv;
232 uint32_t idx = 0;
233
234 /*
235 * This function is called right before the buffer is destroyed. It will free any mappings
236 * associated with the buffer.
237 */
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000238 pthread_mutex_lock(&drv->mappings_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000239 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
240 while (idx < drv_array_size(drv->mappings)) {
241 struct mapping *mapping =
242 (struct mapping *)drv_array_at_idx(drv->mappings, idx);
243 if (mapping->vma->handle != bo->handles[plane].u32) {
244 idx++;
245 continue;
246 }
247
248 if (!--mapping->vma->refcount) {
249 int ret = drv->backend->bo_unmap(bo, mapping->vma);
250 if (ret) {
Yiwei Zhang8bc35bf2021-10-04 21:36:23 +0000251 pthread_mutex_unlock(&drv->mappings_lock);
252 assert(ret);
Yiwei Zhang04954732022-07-13 23:34:33 +0000253 drv_loge("munmap failed\n");
Jason Macnak04c8f512021-09-29 11:38:00 -0700254 return;
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000255 }
256
257 free(mapping->vma);
258 }
259
260 /* This shrinks and shifts the array, so don't increment idx. */
261 drv_array_remove(drv->mappings, idx);
262 }
263 }
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000264 pthread_mutex_unlock(&drv->mappings_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000265}
266
267/*
268 * Acquire a reference on plane buffers of the bo.
269 */
270static void drv_bo_acquire(struct bo *bo)
271{
272 struct driver *drv = bo->drv;
273
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000274 pthread_mutex_lock(&drv->buffer_table_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000275 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
276 uintptr_t num = 0;
277
278 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num))
279 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
280
281 drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num + 1));
282 }
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000283 pthread_mutex_unlock(&drv->buffer_table_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000284}
285
286/*
287 * Release a reference on plane buffers of the bo. Return true when the bo has lost all its
288 * references. Otherwise, return false.
289 */
290static bool drv_bo_release(struct bo *bo)
291{
292 struct driver *drv = bo->drv;
Yiwei Zhang8bc35bf2021-10-04 21:36:23 +0000293 uintptr_t num;
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000294
Bas Nieuwenhuizen631d9e42021-11-10 13:47:16 +0100295 if (drv->backend->bo_release)
296 drv->backend->bo_release(bo);
297
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000298 pthread_mutex_lock(&drv->buffer_table_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000299 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000300 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num)) {
301 drmHashDelete(drv->buffer_table, bo->handles[plane].u32);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000302
Yiwei Zhang8bc35bf2021-10-04 21:36:23 +0000303 if (num > 1) {
304 drmHashInsert(drv->buffer_table, bo->handles[plane].u32,
305 (void *)(num - 1));
306 }
307 }
308 }
309
310 /* The same buffer can back multiple planes with different offsets. */
311 for (size_t plane = 0; plane < bo->meta.num_planes; plane++) {
312 if (!drmHashLookup(drv->buffer_table, bo->handles[plane].u32, (void **)&num)) {
313 /* num is positive if found in the hashmap. */
314 pthread_mutex_unlock(&drv->buffer_table_lock);
315 return false;
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000316 }
317 }
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000318 pthread_mutex_unlock(&drv->buffer_table_lock);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000319
Yiwei Zhang8bc35bf2021-10-04 21:36:23 +0000320 return true;
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000321}
322
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800323struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
Gurchetan Singha1892b22017-09-28 16:40:52 -0700324 uint64_t use_flags)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700325{
326 int ret;
327 struct bo *bo;
David Stevens26fe6822020-03-09 12:23:42 +0000328 bool is_test_alloc;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700329
David Stevens26fe6822020-03-09 12:23:42 +0000330 is_test_alloc = use_flags & BO_USE_TEST_ALLOC;
331 use_flags &= ~BO_USE_TEST_ALLOC;
332
333 bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc);
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700334
335 if (!bo)
336 return NULL;
337
David Stevens26fe6822020-03-09 12:23:42 +0000338 ret = -EINVAL;
339 if (drv->backend->bo_compute_metadata) {
340 ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL,
341 0);
342 if (!is_test_alloc && ret == 0)
343 ret = drv->backend->bo_create_from_metadata(bo);
344 } else if (!is_test_alloc) {
345 ret = drv->backend->bo_create(bo, width, height, format, use_flags);
346 }
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700347
348 if (ret) {
Yiwei Zhang01b69742021-09-16 04:47:54 +0000349 errno = -ret;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700350 free(bo);
351 return NULL;
352 }
353
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000354 drv_bo_acquire(bo);
Gurchetan Singh1647fbe2016-08-03 17:14:55 -0700355
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700356 return bo;
357}
358
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800359struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
360 uint32_t format, const uint64_t *modifiers, uint32_t count)
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700361{
362 int ret;
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700363 struct bo *bo;
364
David Stevens26fe6822020-03-09 12:23:42 +0000365 if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) {
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700366 errno = ENOENT;
367 return NULL;
368 }
369
David Stevens26fe6822020-03-09 12:23:42 +0000370 bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false);
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700371
372 if (!bo)
373 return NULL;
374
David Stevens26fe6822020-03-09 12:23:42 +0000375 ret = -EINVAL;
376 if (drv->backend->bo_compute_metadata) {
377 ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE,
378 modifiers, count);
379 if (ret == 0)
380 ret = drv->backend->bo_create_from_metadata(bo);
381 } else {
382 ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers,
383 count);
384 }
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700385
386 if (ret) {
387 free(bo);
388 return NULL;
389 }
390
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000391 drv_bo_acquire(bo);
Kristian H. Kristensenb1efbd82016-09-06 11:43:26 -0700392
393 return bo;
394}
395
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700396void drv_bo_destroy(struct bo *bo)
397{
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000398 if (!bo->is_test_buffer && drv_bo_release(bo)) {
Jason Macnak04c8f512021-09-29 11:38:00 -0700399 drv_bo_mapping_destroy(bo);
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000400 bo->drv->backend->bo_destroy(bo);
Tomasz Figa27a7e6a2017-08-08 17:59:41 +0900401 }
Gurchetan Singh1647fbe2016-08-03 17:14:55 -0700402
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700403 free(bo);
404}
405
406struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data)
407{
408 int ret;
Gurchetan Singhb72badb2016-08-19 16:26:46 -0700409 size_t plane;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700410 struct bo *bo;
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700411 off_t seek_end;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700412
David Stevens26fe6822020-03-09 12:23:42 +0000413 bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false);
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700414
Gurchetan Singhb72badb2016-08-19 16:26:46 -0700415 if (!bo)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700416 return NULL;
Gurchetan Singhb72badb2016-08-19 16:26:46 -0700417
Gurchetan Singh71611d62017-01-03 16:49:56 -0800418 ret = drv->backend->bo_import(bo, data);
419 if (ret) {
420 free(bo);
421 return NULL;
422 }
423
Yiwei Zhang7fae5d02021-09-24 21:54:20 +0000424 drv_bo_acquire(bo);
Satyajit Sahua047d412018-07-12 12:29:39 +0530425
Gurchetan Singh52155b42021-01-27 17:55:17 -0800426 bo->meta.format_modifier = data->format_modifier;
Gurchetan Singh298b7572019-09-19 09:55:18 -0700427 for (plane = 0; plane < bo->meta.num_planes; plane++) {
428 bo->meta.strides[plane] = data->strides[plane];
429 bo->meta.offsets[plane] = data->offsets[plane];
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700430
431 seek_end = lseek(data->fds[plane], 0, SEEK_END);
432 if (seek_end == (off_t)(-1)) {
Yiwei Zhang04954732022-07-13 23:34:33 +0000433 drv_loge("lseek() failed with %s\n", strerror(errno));
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700434 goto destroy_bo;
435 }
436
437 lseek(data->fds[plane], 0, SEEK_SET);
Gurchetan Singh298b7572019-09-19 09:55:18 -0700438 if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0)
439 bo->meta.sizes[plane] = seek_end - data->offsets[plane];
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700440 else
Gurchetan Singh298b7572019-09-19 09:55:18 -0700441 bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700442
Gurchetan Singh298b7572019-09-19 09:55:18 -0700443 if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) {
Yiwei Zhang04954732022-07-13 23:34:33 +0000444 drv_loge("buffer size is too large.\n");
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700445 goto destroy_bo;
Daniel Hung-yu Wu9607a482017-09-12 20:05:08 +0800446 }
447
Gurchetan Singh298b7572019-09-19 09:55:18 -0700448 bo->meta.total_size += bo->meta.sizes[plane];
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700449 }
450
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700451 return bo;
Gurchetan Singhc26fd1e2017-09-29 10:18:59 -0700452
453destroy_bo:
454 drv_bo_destroy(bo);
455 return NULL;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700456}
457
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800458void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
459 struct mapping **map_data, size_t plane)
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700460{
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000461 struct driver *drv = bo->drv;
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700462 uint32_t i;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700463 uint8_t *addr;
Gurchetan Singh99644382020-10-07 15:28:11 -0700464 struct mapping mapping = { 0 };
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700465
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800466 assert(rect->width >= 0);
467 assert(rect->height >= 0);
468 assert(rect->x + rect->width <= drv_bo_get_width(bo));
469 assert(rect->y + rect->height <= drv_bo_get_height(bo));
Gurchetan Singhf7f633a2017-09-28 17:02:12 -0700470 assert(BO_MAP_READ_WRITE & map_flags);
Tomasz Figae0807b12017-08-04 12:50:03 +0900471 /* No CPU access for protected buffers. */
Gurchetan Singh298b7572019-09-19 09:55:18 -0700472 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700473
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800474 if (bo->is_test_buffer)
David Stevens26fe6822020-03-09 12:23:42 +0000475 return MAP_FAILED;
David Stevens26fe6822020-03-09 12:23:42 +0000476
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800477 mapping.rect = *rect;
478 mapping.refcount = 1;
479
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000480 pthread_mutex_lock(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700481
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000482 for (i = 0; i < drv_array_size(drv->mappings); i++) {
483 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700484 if (prior->vma->handle != bo->handles[plane].u32 ||
485 prior->vma->map_flags != map_flags)
486 continue;
487
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800488 if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
489 rect->width != prior->rect.width || rect->height != prior->rect.height)
490 continue;
491
492 prior->refcount++;
493 *map_data = prior;
494 goto exact_match;
495 }
496
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000497 for (i = 0; i < drv_array_size(drv->mappings); i++) {
498 struct mapping *prior = (struct mapping *)drv_array_at_idx(drv->mappings, i);
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800499 if (prior->vma->handle != bo->handles[plane].u32 ||
500 prior->vma->map_flags != map_flags)
501 continue;
502
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700503 prior->vma->refcount++;
504 mapping.vma = prior->vma;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700505 goto success;
506 }
507
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700508 mapping.vma = calloc(1, sizeof(*mapping.vma));
Yiwei Zhangafdf87d2021-09-28 04:06:06 +0000509 if (!mapping.vma) {
510 *map_data = NULL;
511 pthread_mutex_unlock(&drv->mappings_lock);
512 return MAP_FAILED;
513 }
514
Gurchetan Singh298b7572019-09-19 09:55:18 -0700515 memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides));
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000516 addr = drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700517 if (addr == MAP_FAILED) {
518 *map_data = NULL;
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700519 free(mapping.vma);
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000520 pthread_mutex_unlock(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700521 return MAP_FAILED;
522 }
523
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700524 mapping.vma->refcount = 1;
525 mapping.vma->addr = addr;
526 mapping.vma->handle = bo->handles[plane].u32;
527 mapping.vma->map_flags = map_flags;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700528
529success:
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000530 *map_data = drv_array_append(drv->mappings, &mapping);
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800531exact_match:
532 drv_bo_invalidate(bo, *map_data);
533 addr = (uint8_t *)((*map_data)->vma->addr);
534 addr += drv_bo_get_plane_offset(bo, plane);
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000535 pthread_mutex_unlock(&drv->mappings_lock);
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800536 return (void *)addr;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700537}
538
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700539int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700540{
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000541 struct driver *drv = bo->drv;
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700542 uint32_t i;
Gurchetan Singhbd1b1b52018-03-29 16:34:53 -0700543 int ret = 0;
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700544
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000545 pthread_mutex_lock(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700546
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800547 if (--mapping->refcount)
548 goto out;
549
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700550 if (!--mapping->vma->refcount) {
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000551 ret = drv->backend->bo_unmap(bo, mapping->vma);
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700552 free(mapping->vma);
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700553 }
554
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000555 for (i = 0; i < drv_array_size(drv->mappings); i++) {
556 if (mapping == (struct mapping *)drv_array_at_idx(drv->mappings, i)) {
557 drv_array_remove(drv->mappings, i);
Gurchetan Singhcfedbcc2017-11-02 17:32:00 -0700558 break;
559 }
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700560 }
561
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800562out:
Yiwei Zhang84236dd2021-09-27 20:18:58 +0000563 pthread_mutex_unlock(&drv->mappings_lock);
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700564 return ret;
565}
566
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700567int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
Gurchetan Singhc2ad63e2017-10-09 17:59:47 -0700568{
569 int ret = 0;
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700570
571 assert(mapping);
572 assert(mapping->vma);
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800573 assert(mapping->refcount > 0);
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700574 assert(mapping->vma->refcount > 0);
Gurchetan Singhc2ad63e2017-10-09 17:59:47 -0700575
576 if (bo->drv->backend->bo_invalidate)
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700577 ret = bo->drv->backend->bo_invalidate(bo, mapping);
Gurchetan Singhc2ad63e2017-10-09 17:59:47 -0700578
579 return ret;
580}
581
Jason Macnak1de7f662020-01-24 15:05:57 -0800582int drv_bo_flush(struct bo *bo, struct mapping *mapping)
583{
584 int ret = 0;
585
586 assert(mapping);
587 assert(mapping->vma);
588 assert(mapping->refcount > 0);
589 assert(mapping->vma->refcount > 0);
590
591 if (bo->drv->backend->bo_flush)
592 ret = bo->drv->backend->bo_flush(bo, mapping);
593
594 return ret;
595}
596
Gurchetan Singhbd1b1b52018-03-29 16:34:53 -0700597int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
Gurchetan Singhff741412017-09-13 17:54:36 -0700598{
599 int ret = 0;
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700600
601 assert(mapping);
602 assert(mapping->vma);
Gurchetan Singh1ef809e2017-11-06 11:07:52 -0800603 assert(mapping->refcount > 0);
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700604 assert(mapping->vma->refcount > 0);
Gurchetan Singh298b7572019-09-19 09:55:18 -0700605 assert(!(bo->meta.use_flags & BO_USE_PROTECTED));
Gurchetan Singhff741412017-09-13 17:54:36 -0700606
607 if (bo->drv->backend->bo_flush)
Gurchetan Singh47e629b2017-11-02 14:07:18 -0700608 ret = bo->drv->backend->bo_flush(bo, mapping);
Gurchetan Singhbd1b1b52018-03-29 16:34:53 -0700609 else
610 ret = drv_bo_unmap(bo, mapping);
Gurchetan Singhff741412017-09-13 17:54:36 -0700611
612 return ret;
613}
614
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700615uint32_t drv_bo_get_width(struct bo *bo)
616{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700617 return bo->meta.width;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700618}
619
620uint32_t drv_bo_get_height(struct bo *bo)
621{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700622 return bo->meta.height;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700623}
624
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700625size_t drv_bo_get_num_planes(struct bo *bo)
626{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700627 return bo->meta.num_planes;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700628}
629
630union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane)
631{
632 return bo->handles[plane];
633}
634
635#ifndef DRM_RDWR
636#define DRM_RDWR O_RDWR
637#endif
638
639int drv_bo_get_plane_fd(struct bo *bo, size_t plane)
640{
641
642 int ret, fd;
Gurchetan Singh298b7572019-09-19 09:55:18 -0700643 assert(plane < bo->meta.num_planes);
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700644
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800645 if (bo->is_test_buffer)
David Stevens26fe6822020-03-09 12:23:42 +0000646 return -EINVAL;
David Stevens26fe6822020-03-09 12:23:42 +0000647
Gurchetan Singh1b1d56a2017-03-10 16:25:23 -0800648 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700649
Alistair Strachanf048a1e2018-03-20 11:10:51 -0700650 // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
651 if (ret)
652 ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
653
Jason Macnak166fe142021-01-29 07:50:34 -0800654 if (ret)
Yiwei Zhang04954732022-07-13 23:34:33 +0000655 drv_loge("Failed to get plane fd: %s\n", strerror(errno));
Jason Macnak166fe142021-01-29 07:50:34 -0800656
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700657 return (ret) ? ret : fd;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700658}
659
660uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane)
661{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700662 assert(plane < bo->meta.num_planes);
663 return bo->meta.offsets[plane];
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700664}
665
666uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane)
667{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700668 assert(plane < bo->meta.num_planes);
669 return bo->meta.sizes[plane];
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700670}
671
672uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane)
673{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700674 assert(plane < bo->meta.num_planes);
675 return bo->meta.strides[plane];
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700676}
677
Gurchetan Singh52155b42021-01-27 17:55:17 -0800678uint64_t drv_bo_get_format_modifier(struct bo *bo)
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700679{
Gurchetan Singh52155b42021-01-27 17:55:17 -0800680 return bo->meta.format_modifier;
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700681}
Gurchetan Singhbfba8c22016-08-16 17:57:10 -0700682
Gurchetan Singhf3b22da2016-11-21 10:46:38 -0800683uint32_t drv_bo_get_format(struct bo *bo)
Gurchetan Singh2e786ad2016-08-24 18:31:23 -0700684{
Gurchetan Singh298b7572019-09-19 09:55:18 -0700685 return bo->meta.format;
Gurchetan Singh2e786ad2016-08-24 18:31:23 -0700686}
687
Yiwei Zhang1f9b9002021-09-15 21:28:51 +0000688uint32_t drv_bo_get_tiling(struct bo *bo)
689{
690 return bo->meta.tiling;
691}
692
693uint64_t drv_bo_get_use_flags(struct bo *bo)
694{
695 return bo->meta.use_flags;
696}
697
Jason Macnak1de7f662020-01-24 15:05:57 -0800698size_t drv_bo_get_total_size(struct bo *bo)
699{
700 return bo->meta.total_size;
701}
702
Yiwei Zhangb7a64442021-09-30 05:13:10 +0000703/*
704 * Map internal fourcc codes back to standard fourcc codes.
705 */
706uint32_t drv_get_standard_fourcc(uint32_t fourcc_internal)
707{
708 return (fourcc_internal == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : fourcc_internal;
709}
710
Yiwei Zhangb8ad7b82021-10-01 17:55:14 +0000711void drv_resolve_format_and_use_flags(struct driver *drv, uint32_t format, uint64_t use_flags,
712 uint32_t *out_format, uint64_t *out_use_flags)
Gurchetan Singhbfba8c22016-08-16 17:57:10 -0700713{
Yiwei Zhangb8ad7b82021-10-01 17:55:14 +0000714 assert(drv->backend->resolve_format_and_use_flags);
Gurchetan Singhbfba8c22016-08-16 17:57:10 -0700715
Yiwei Zhangb8ad7b82021-10-01 17:55:14 +0000716 drv->backend->resolve_format_and_use_flags(drv, format, use_flags, out_format,
717 out_use_flags);
Yiwei Zhangc1413ea2021-09-17 08:20:21 +0000718}
719
Gurchetan Singh2e786ad2016-08-24 18:31:23 -0700720uint32_t drv_num_buffers_per_bo(struct bo *bo)
721{
722 uint32_t count = 0;
723 size_t plane, p;
724
Gurchetan Singhcadc54f2021-02-01 12:03:11 -0800725 if (bo->is_test_buffer)
David Stevens26fe6822020-03-09 12:23:42 +0000726 return 0;
David Stevens26fe6822020-03-09 12:23:42 +0000727
Gurchetan Singh298b7572019-09-19 09:55:18 -0700728 for (plane = 0; plane < bo->meta.num_planes; plane++) {
Gurchetan Singh2e786ad2016-08-24 18:31:23 -0700729 for (p = 0; p < plane; p++)
730 if (bo->handles[p].u32 == bo->handles[plane].u32)
731 break;
732 if (p == plane)
733 count++;
734 }
735
736 return count;
737}
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700738
Yiwei Zhang7b3cbea2022-07-13 00:25:11 +0000739void drv_log_prefix(enum drv_log_level level, const char *prefix, const char *file, int line,
740 const char *format, ...)
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700741{
742 char buf[50];
743 snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
744
745 va_list args;
746 va_start(args, format);
747#ifdef __ANDROID__
Yiwei Zhang7b3cbea2022-07-13 00:25:11 +0000748 int prio = ANDROID_LOG_ERROR;
749 switch (level) {
750 case DRV_LOGV:
751 prio = ANDROID_LOG_VERBOSE;
752 break;
753 case DRV_LOGD:
754 prio = ANDROID_LOG_DEBUG;
755 break;
756 case DRV_LOGI:
757 prio = ANDROID_LOG_INFO;
758 break;
759 case DRV_LOGE:
760 default:
761 break;
762 };
763 __android_log_vprint(prio, buf, format, args);
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700764#else
Yiwei Zhang7b3cbea2022-07-13 00:25:11 +0000765 if (level == DRV_LOGE) {
766 fprintf(stderr, "%s ", buf);
767 vfprintf(stderr, format, args);
768 } else {
769 fprintf(stdout, "%s ", buf);
770 vfprintf(stdout, format, args);
771 }
Alistair Strachan0cfaaa52018-03-19 14:03:23 -0700772#endif
773 va_end(args);
774}
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700775
776int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
Yiwei Zhanga1e93fd2021-04-30 07:01:55 +0000777 uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700778{
779 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
780 strides[plane] = bo->meta.strides[plane];
781 offsets[plane] = bo->meta.offsets[plane];
782 }
Yiwei Zhanga1e93fd2021-04-30 07:01:55 +0000783 *format_modifier = bo->meta.format_modifier;
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700784
785 if (bo->drv->backend->resource_info)
Yiwei Zhanga1e93fd2021-04-30 07:01:55 +0000786 return bo->drv->backend->resource_info(bo, strides, offsets, format_modifier);
Gurchetan Singhbc4f0232019-06-27 20:05:54 -0700787
788 return 0;
789}
Jason Macnak336fd052021-09-29 11:10:06 -0700790
791uint32_t drv_get_max_texture_2d_size(struct driver *drv)
792{
793 if (drv->backend->get_max_texture_2d_size)
794 return drv->backend->get_max_texture_2d_size(drv);
795
796 return UINT32_MAX;
797}