blob: 96f839d4b5e516283d71770e10b74e9c8b5d3235 [file] [log] [blame]
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +02001// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <sys/ipc.h>
9#include <sys/msg.h>
10#include <fcntl.h>
11#include <pthread.h>
12#include <signal.h>
13#include <stdlib.h>
14#include <unistd.h>
15#include "igt.h"
16#include "igt_map.h"
17#include "intel_allocator.h"
18#include "intel_allocator_msgchannel.h"
19
20//#define ALLOCDBG
21#ifdef ALLOCDBG
22#define alloc_info igt_info
23#define alloc_debug igt_debug
24static const char *reqtype_str[] = {
25 [REQ_STOP] = "stop",
26 [REQ_OPEN] = "open",
27 [REQ_OPEN_AS] = "open as",
28 [REQ_CLOSE] = "close",
29 [REQ_ADDRESS_RANGE] = "address range",
30 [REQ_ALLOC] = "alloc",
31 [REQ_FREE] = "free",
32 [REQ_IS_ALLOCATED] = "is allocated",
33 [REQ_RESERVE] = "reserve",
34 [REQ_UNRESERVE] = "unreserve",
35 [REQ_RESERVE_IF_NOT_ALLOCATED] = "reserve-ina",
36 [REQ_IS_RESERVED] = "is reserved",
37};
38static inline const char *reqstr(enum reqtype request_type)
39{
40 igt_assert(request_type >= REQ_STOP && request_type <= REQ_IS_RESERVED);
41 return reqtype_str[request_type];
42}
43#else
44#define alloc_info(...) {}
45#define alloc_debug(...) {}
46#endif
47
48struct allocator {
49 int fd;
50 uint32_t ctx;
51 uint32_t vm;
52 _Atomic(int32_t) refcount;
53 struct intel_allocator *ial;
54};
55
56struct handle_entry {
57 uint64_t handle;
58 struct allocator *al;
59};
60
61struct intel_allocator *intel_allocator_reloc_create(int fd);
62struct intel_allocator *intel_allocator_random_create(int fd);
63struct intel_allocator *intel_allocator_simple_create(int fd);
64struct intel_allocator *
65intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
66 enum allocator_strategy strategy);
67
68/*
69 * Instead of trying to find first empty handle just get new one. Assuming
70 * our counter is incremented 2^32 times per second (4GHz clock and handle
71 * assignment takes single clock) 64-bit counter would wrap around after
72 * ~68 years.
73 *
74 * allocator
75 * handles <fd, ctx> intel allocator
76 * +-----+ +--------+ +-------------+
77 * | 1 +---------->+ fd: 3 +--------->+ data: ... |
78 * +-----+ +---->+ ctx: 1 | | refcount: 2 |
79 * | 2 +-----+ | ref: 2 | +-------------+
80 * +-----+ +--------+
81 * | 3 +--+ +--------+ intel allocator
82 * +-----+ | | fd: 3 | +-------------+
83 * | ... | +------->| ctx: 2 +--------->+ data: ... |
84 * +-----+ | ref: 1 | | refcount: 1 |
85 * | n +--------+ +--------+ +-------------+
86 * +-----+ |
87 * | ... +-----+ | allocator
88 * +-----+ | | <fd, vm> intel allocator
89 * | ... +--+ | | +--------+ +-------------+
90 * + + | | +->+ fd: 3 +-----+--->+ data: ... |
91 * | +---->+ vm: 1 | | | refcount: 3 |
92 * | | ref: 2 | | +-------------+
93 * | +--------+ |
94 * | +--------+ |
95 * | | fd: 3 | |
96 * +------->+ vm: 2 +-----+
97 * | ref: 1 |
98 * +--------+
99 */
100static _Atomic(uint64_t) next_handle;
101static struct igt_map *handles;
102static struct igt_map *ctx_map;
103static struct igt_map *vm_map;
104static pthread_mutex_t map_mutex = PTHREAD_MUTEX_INITIALIZER;
105#define GET_MAP(vm) ((vm) ? vm_map : ctx_map)
106
107static bool multiprocess;
108static pthread_t allocator_thread;
Zbigniew Kempczyński9960e3d2020-10-23 09:30:12 +0200109static bool allocator_thread_running;
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200110
111static bool warn_if_not_empty;
112
113/* For allocator purposes we need to track pid/tid */
114static pid_t allocator_pid = -1;
115extern pid_t child_pid;
116extern __thread pid_t child_tid;
117
118/*
119 * - for parent process we have child_pid == -1
120 * - for child which calls intel_allocator_init() allocator_pid == child_pid
121 */
122static inline bool is_same_process(void)
123{
124 return child_pid == -1 || allocator_pid == child_pid;
125}
126
127static struct msg_channel *channel;
128
129static int send_alloc_stop(struct msg_channel *msgchan)
130{
131 struct alloc_req req = {0};
132
133 req.request_type = REQ_STOP;
134
135 return msgchan->send_req(msgchan, &req);
136}
137
138static int send_req(struct msg_channel *msgchan, pid_t tid,
139 struct alloc_req *request)
140{
141 request->tid = tid;
142 return msgchan->send_req(msgchan, request);
143}
144
145static int recv_req(struct msg_channel *msgchan, struct alloc_req *request)
146{
147 return msgchan->recv_req(msgchan, request);
148}
149
150static int send_resp(struct msg_channel *msgchan,
151 pid_t tid, struct alloc_resp *response)
152{
153 response->tid = tid;
154 return msgchan->send_resp(msgchan, response);
155}
156
157static int recv_resp(struct msg_channel *msgchan,
158 pid_t tid, struct alloc_resp *response)
159{
160 response->tid = tid;
161 return msgchan->recv_resp(msgchan, response);
162}
163
164static inline void map_entry_free_func(struct igt_map_entry *entry)
165{
166 free(entry->data);
167}
168
169static uint64_t __handle_create(struct allocator *al)
170{
171 struct handle_entry *h = malloc(sizeof(*h));
172
173 igt_assert(h);
174 h->handle = atomic_fetch_add(&next_handle, 1);
175 h->al = al;
176 igt_map_insert(handles, h, h);
177
178 return h->handle;
179}
180
181static void __handle_destroy(uint64_t handle)
182{
183 struct handle_entry he = { .handle = handle };
184
185 igt_map_remove(handles, &he, map_entry_free_func);
186}
187
188static struct allocator *__allocator_find(int fd, uint32_t ctx, uint32_t vm)
189{
190 struct allocator al = { .fd = fd, .ctx = ctx, .vm = vm };
191 struct igt_map *map = GET_MAP(vm);
192
193 return igt_map_search(map, &al);
194}
195
196static struct allocator *__allocator_find_by_handle(uint64_t handle)
197{
198 struct handle_entry *h, he = { .handle = handle };
199
200 h = igt_map_search(handles, &he);
201 if (!h)
202 return NULL;
203
204 return h->al;
205}
206
207static struct allocator *__allocator_create(int fd, uint32_t ctx, uint32_t vm,
208 struct intel_allocator *ial)
209{
210 struct igt_map *map = GET_MAP(vm);
211 struct allocator *al = malloc(sizeof(*al));
212
213 igt_assert(al);
214 igt_assert(fd == ial->fd);
215 al->fd = fd;
216 al->ctx = ctx;
217 al->vm = vm;
218 atomic_init(&al->refcount, 0);
219 al->ial = ial;
220
221 igt_map_insert(map, al, al);
222
223 return al;
224}
225
226static void __allocator_destroy(struct allocator *al)
227{
228 struct igt_map *map = GET_MAP(al->vm);
229
230 igt_map_remove(map, al, map_entry_free_func);
231}
232
233static int __allocator_get(struct allocator *al)
234{
235 struct intel_allocator *ial = al->ial;
236 int refcount;
237
238 atomic_fetch_add(&al->refcount, 1);
239 refcount = atomic_fetch_add(&ial->refcount, 1);
240 igt_assert(refcount >= 0);
241
242 return refcount;
243}
244
245static bool __allocator_put(struct allocator *al)
246{
247 struct intel_allocator *ial = al->ial;
248 bool released = false;
249 int refcount, al_refcount;
250
251 al_refcount = atomic_fetch_sub(&al->refcount, 1);
252 refcount = atomic_fetch_sub(&ial->refcount, 1);
253 igt_assert(refcount >= 1);
254 if (refcount == 1) {
255 if (!ial->is_empty(ial) && warn_if_not_empty)
256 igt_warn("Allocator not clear before destroy!\n");
257
258 /* Check allocator has also refcount == 1 */
259 igt_assert_eq(al_refcount, 1);
260
261 released = true;
262 }
263
264 return released;
265}
266
267static struct intel_allocator *intel_allocator_create(int fd,
268 uint64_t start, uint64_t end,
269 uint8_t allocator_type,
270 uint8_t allocator_strategy)
271{
272 struct intel_allocator *ial = NULL;
273
274 switch (allocator_type) {
275 /*
276 * Few words of explanation is required here.
277 *
278 * INTEL_ALLOCATOR_NONE allows keeping information in the code (intel-bb
279 * is an example) we're not using IGT allocator itself and likely
280 * we rely on relocations.
281 * So trying to create NONE allocator doesn't makes sense and below
282 * assertion catches such invalid usage.
283 */
284 case INTEL_ALLOCATOR_NONE:
285 igt_assert_f(allocator_type != INTEL_ALLOCATOR_NONE,
286 "We cannot use NONE allocator\n");
287 break;
288 case INTEL_ALLOCATOR_RELOC:
289 ial = intel_allocator_reloc_create(fd);
290 break;
291 case INTEL_ALLOCATOR_RANDOM:
292 ial = intel_allocator_random_create(fd);
293 break;
294 case INTEL_ALLOCATOR_SIMPLE:
295 if (!start && !end)
296 ial = intel_allocator_simple_create(fd);
297 else
298 ial = intel_allocator_simple_create_full(fd, start, end,
299 allocator_strategy);
300 break;
301 default:
302 igt_assert_f(ial, "Allocator type %d not implemented\n",
303 allocator_type);
304 break;
305 }
306
307 igt_assert(ial);
308
309 ial->type = allocator_type;
310 ial->strategy = allocator_strategy;
311 pthread_mutex_init(&ial->mutex, NULL);
312
313 return ial;
314}
315
316static void intel_allocator_destroy(struct intel_allocator *ial)
317{
318 alloc_info("Destroying allocator (empty: %d)\n", ial->is_empty(ial));
319
320 ial->destroy(ial);
321}
322
323static struct allocator *allocator_open(int fd, uint32_t ctx, uint32_t vm,
324 uint64_t start, uint64_t end,
325 uint8_t allocator_type,
326 uint8_t allocator_strategy,
327 uint64_t *ahndp)
328{
329 struct intel_allocator *ial;
330 struct allocator *al;
331 const char *idstr = vm ? "vm" : "ctx";
332
333 igt_assert(ahndp);
334
335 al = __allocator_find(fd, ctx, vm);
336 if (!al) {
337 alloc_info("Allocator fd: %d, ctx: %u, vm: %u, <0x%llx : 0x%llx> "
338 "not found, creating one\n",
339 fd, ctx, vm, (long long) start, (long long) end);
340 ial = intel_allocator_create(fd, start, end, allocator_type,
341 allocator_strategy);
342 al = __allocator_create(fd, ctx, vm, ial);
343 }
344
345 ial = al->ial;
346
347 igt_assert_f(ial->type == allocator_type,
348 "Allocator type must be same for fd/%s\n", idstr);
349
350 igt_assert_f(ial->strategy == allocator_strategy,
351 "Allocator strategy must be same or fd/%s\n", idstr);
352
353 __allocator_get(al);
354 *ahndp = __handle_create(al);
355
356 return al;
357}
358
359static struct allocator *allocator_open_as(struct allocator *base,
360 uint32_t new_vm, uint64_t *ahndp)
361{
362 struct allocator *al;
363
364 igt_assert(ahndp);
365 al = __allocator_create(base->fd, base->ctx, new_vm, base->ial);
366 __allocator_get(al);
367 *ahndp = __handle_create(al);
368
369 return al;
370}
371
372static bool allocator_close(uint64_t ahnd)
373{
374 struct allocator *al;
375 bool released, is_empty = false;
376
377 al = __allocator_find_by_handle(ahnd);
378 if (!al) {
379 igt_warn("Cannot find handle: %llx\n", (long long) ahnd);
380 return false;
381 }
382
383 released = __allocator_put(al);
384 if (released) {
385 is_empty = al->ial->is_empty(al->ial);
386 intel_allocator_destroy(al->ial);
387 }
388
389 if (!atomic_load(&al->refcount))
390 __allocator_destroy(al);
391
392 __handle_destroy(ahnd);
393
394 return is_empty;
395}
396
397static int send_req_recv_resp(struct msg_channel *msgchan,
398 struct alloc_req *request,
399 struct alloc_resp *response)
400{
401 int ret;
402
403 ret = send_req(msgchan, child_tid, request);
404 if (ret < 0) {
405 igt_warn("Error sending request [type: %d]: err = %d [%s]\n",
406 request->request_type, errno, strerror(errno));
407
408 return ret;
409 }
410
411 ret = recv_resp(msgchan, child_tid, response);
412 if (ret < 0)
413 igt_warn("Error receiving response [type: %d]: err = %d [%s]\n",
414 request->request_type, errno, strerror(errno));
415
416 /*
417 * This is main assumption - we receive message which size must be > 0.
418 * If this is fulfilled we return 0 as a success.
419 */
420 if (ret > 0)
421 ret = 0;
422
423 return ret;
424}
425
426static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
427{
428 int ret;
429 long refcnt;
430
431 memset(resp, 0, sizeof(*resp));
432
433 if (is_same_process()) {
434 struct intel_allocator *ial;
435 struct allocator *al;
436 uint64_t start, end, size, ahnd;
437 uint32_t ctx, vm;
438 bool allocated, reserved, unreserved;
439 /* Used when debug is on, so avoid compilation warnings */
440 (void) ctx;
441 (void) vm;
442 (void) refcnt;
443
444 /*
445 * Mutex only work on allocator instance, not stop/open/close
446 */
447 if (req->request_type > REQ_CLOSE) {
448 /*
449 * We have to lock map mutex because concurrent open
450 * can lead to resizing the map.
451 */
452 pthread_mutex_lock(&map_mutex);
453 al = __allocator_find_by_handle(req->allocator_handle);
454 pthread_mutex_unlock(&map_mutex);
455 igt_assert(al);
456
457 ial = al->ial;
458 igt_assert(ial);
459 pthread_mutex_lock(&ial->mutex);
460 }
461
462 switch (req->request_type) {
463 case REQ_STOP:
464 alloc_info("<stop>\n");
465 break;
466
467 case REQ_OPEN:
468 pthread_mutex_lock(&map_mutex);
469 al = allocator_open(req->open.fd,
470 req->open.ctx, req->open.vm,
471 req->open.start, req->open.end,
472 req->open.allocator_type,
473 req->open.allocator_strategy,
474 &ahnd);
475 refcnt = atomic_load(&al->refcount);
476 ret = atomic_load(&al->ial->refcount);
477 pthread_mutex_unlock(&map_mutex);
478
479 resp->response_type = RESP_OPEN;
480 resp->open.allocator_handle = ahnd;
481
482 alloc_info("<open> [tid: %ld] fd: %d, ahnd: %" PRIx64
483 ", ctx: %u, vm: %u"
484 ", alloc_type: %u, al->refcnt: %ld->%ld"
485 ", refcnt: %d->%d\n",
486 (long) req->tid, req->open.fd, ahnd,
487 req->open.ctx,
488 req->open.vm, req->open.allocator_type,
489 refcnt - 1, refcnt, ret - 1, ret);
490 break;
491
492 case REQ_OPEN_AS:
493 /* lock first to avoid concurrent close */
494 pthread_mutex_lock(&map_mutex);
495
496 al = __allocator_find_by_handle(req->allocator_handle);
497 resp->response_type = RESP_OPEN_AS;
498
499 if (!al) {
500 alloc_info("<open as> [tid: %ld] ahnd: %" PRIx64
501 " -> no handle\n",
502 (long) req->tid, req->allocator_handle);
503 pthread_mutex_unlock(&map_mutex);
504 break;
505 }
506
507 if (!al->vm) {
508 alloc_info("<open as> [tid: %ld] ahnd: %" PRIx64
509 " -> only open as for <fd, vm> is possible\n",
510 (long) req->tid, req->allocator_handle);
511 pthread_mutex_unlock(&map_mutex);
512 break;
513 }
514
515
516 al = allocator_open_as(al, req->open_as.new_vm, &ahnd);
517 refcnt = atomic_load(&al->refcount);
518 ret = atomic_load(&al->ial->refcount);
519 pthread_mutex_unlock(&map_mutex);
520
521 resp->response_type = RESP_OPEN_AS;
522 resp->open.allocator_handle = ahnd;
523
524 alloc_info("<open as> [tid: %ld] fd: %d, ahnd: %" PRIx64
525 ", ctx: %u, vm: %u"
526 ", alloc_type: %u, al->refcnt: %ld->%ld"
527 ", refcnt: %d->%d\n",
528 (long) req->tid, al->fd, ahnd,
529 al->ctx, al->vm, al->ial->type,
530 refcnt - 1, refcnt, ret - 1, ret);
531 break;
532
533 case REQ_CLOSE:
534 pthread_mutex_lock(&map_mutex);
535 al = __allocator_find_by_handle(req->allocator_handle);
536 resp->response_type = RESP_CLOSE;
537
538 if (!al) {
539 alloc_info("<close> [tid: %ld] ahnd: %" PRIx64
540 " -> no handle\n",
541 (long) req->tid, req->allocator_handle);
542 pthread_mutex_unlock(&map_mutex);
543 break;
544 }
545
546 resp->response_type = RESP_CLOSE;
547 ctx = al->ctx;
548 vm = al->vm;
549
550 refcnt = atomic_load(&al->refcount);
551 ret = atomic_load(&al->ial->refcount);
552 resp->close.is_empty = allocator_close(req->allocator_handle);
553 pthread_mutex_unlock(&map_mutex);
554
555 alloc_info("<close> [tid: %ld] ahnd: %" PRIx64
556 ", ctx: %u, vm: %u"
557 ", is_empty: %d, al->refcount: %ld->%ld"
558 ", refcnt: %d->%d\n",
559 (long) req->tid, req->allocator_handle,
560 ctx, vm, resp->close.is_empty,
561 refcnt, refcnt - 1, ret, ret - 1);
562
563 break;
564
565 case REQ_ADDRESS_RANGE:
566 resp->response_type = RESP_ADDRESS_RANGE;
567 ial->get_address_range(ial, &start, &end);
568 resp->address_range.start = start;
569 resp->address_range.end = end;
570 alloc_info("<address range> [tid: %ld] ahnd: %" PRIx64
571 ", ctx: %u, vm: %u"
572 ", start: 0x%" PRIx64 ", end: 0x%" PRId64 "\n",
573 (long) req->tid, req->allocator_handle,
574 al->ctx, al->vm, start, end);
575 break;
576
577 case REQ_ALLOC:
578 resp->response_type = RESP_ALLOC;
579 resp->alloc.offset = ial->alloc(ial,
580 req->alloc.handle,
581 req->alloc.size,
Zbigniew Kempczyński35b74162021-04-02 11:15:39 +0200582 req->alloc.alignment,
583 req->alloc.strategy);
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200584 alloc_info("<alloc> [tid: %ld] ahnd: %" PRIx64
585 ", ctx: %u, vm: %u, handle: %u"
586 ", size: 0x%" PRIx64 ", offset: 0x%" PRIx64
Zbigniew Kempczyński35b74162021-04-02 11:15:39 +0200587 ", alignment: 0x%" PRIx64 ", strategy: %u\n",
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200588 (long) req->tid, req->allocator_handle,
589 al->ctx, al->vm,
590 req->alloc.handle, req->alloc.size,
Zbigniew Kempczyński35b74162021-04-02 11:15:39 +0200591 resp->alloc.offset, req->alloc.alignment,
592 req->alloc.strategy);
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200593 break;
594
595 case REQ_FREE:
596 resp->response_type = RESP_FREE;
597 resp->free.freed = ial->free(ial, req->free.handle);
598 alloc_info("<free> [tid: %ld] ahnd: %" PRIx64
599 ", ctx: %u, vm: %u"
600 ", handle: %u, freed: %d\n",
601 (long) req->tid, req->allocator_handle,
602 al->ctx, al->vm,
603 req->free.handle, resp->free.freed);
604 break;
605
606 case REQ_IS_ALLOCATED:
607 resp->response_type = RESP_IS_ALLOCATED;
608 allocated = ial->is_allocated(ial,
609 req->is_allocated.handle,
610 req->is_allocated.size,
611 req->is_allocated.offset);
612 resp->is_allocated.allocated = allocated;
613 alloc_info("<is allocated> [tid: %ld] ahnd: %" PRIx64
614 ", ctx: %u, vm: %u"
615 ", offset: 0x%" PRIx64
616 ", allocated: %d\n", (long) req->tid,
617 req->allocator_handle, al->ctx, al->vm,
618 req->is_allocated.offset, allocated);
619 break;
620
621 case REQ_RESERVE:
622 resp->response_type = RESP_RESERVE;
623 reserved = ial->reserve(ial,
624 req->reserve.handle,
625 req->reserve.start,
626 req->reserve.end);
627 resp->reserve.reserved = reserved;
628 alloc_info("<reserve> [tid: %ld] ahnd: %" PRIx64
629 ", ctx: %u, vm: %u, handle: %u"
630 ", start: 0x%" PRIx64 ", end: 0x%" PRIx64
631 ", reserved: %d\n",
632 (long) req->tid, req->allocator_handle,
633 al->ctx, al->vm, req->reserve.handle,
634 req->reserve.start, req->reserve.end, reserved);
635 break;
636
637 case REQ_UNRESERVE:
638 resp->response_type = RESP_UNRESERVE;
639 unreserved = ial->unreserve(ial,
640 req->unreserve.handle,
641 req->unreserve.start,
642 req->unreserve.end);
643 resp->unreserve.unreserved = unreserved;
644 alloc_info("<unreserve> [tid: %ld] ahnd: %" PRIx64
645 ", ctx: %u, vm: %u, handle: %u"
646 ", start: 0x%" PRIx64 ", end: 0x%" PRIx64
647 ", unreserved: %d\n",
648 (long) req->tid, req->allocator_handle,
649 al->ctx, al->vm, req->unreserve.handle,
650 req->unreserve.start, req->unreserve.end,
651 unreserved);
652 break;
653
654 case REQ_IS_RESERVED:
655 resp->response_type = RESP_IS_RESERVED;
656 reserved = ial->is_reserved(ial,
657 req->is_reserved.start,
658 req->is_reserved.end);
659 resp->is_reserved.reserved = reserved;
660 alloc_info("<is reserved> [tid: %ld] ahnd: %" PRIx64
661 ", ctx: %u, vm: %u"
662 ", start: 0x%" PRIx64 ", end: 0x%" PRIx64
663 ", reserved: %d\n",
664 (long) req->tid, req->allocator_handle,
665 al->ctx, al->vm, req->is_reserved.start,
666 req->is_reserved.end, reserved);
667 break;
668
669 case REQ_RESERVE_IF_NOT_ALLOCATED:
670 resp->response_type = RESP_RESERVE_IF_NOT_ALLOCATED;
671 size = req->reserve.end - req->reserve.start;
672
673 allocated = ial->is_allocated(ial, req->reserve.handle,
674 size, req->reserve.start);
675 if (allocated) {
676 resp->reserve_if_not_allocated.allocated = allocated;
677 alloc_info("<reserve if not allocated> [tid: %ld] "
678 "ahnd: %" PRIx64 ", ctx: %u, vm: %u"
679 ", handle: %u, size: 0x%lx"
680 ", start: 0x%" PRIx64 ", end: 0x%" PRIx64
681 ", allocated: %d, reserved: %d\n",
682 (long) req->tid, req->allocator_handle,
683 al->ctx, al->vm, req->reserve.handle,
684 (long) size, req->reserve.start,
685 req->reserve.end, allocated, false);
686 break;
687 }
688
689 reserved = ial->reserve(ial,
690 req->reserve.handle,
691 req->reserve.start,
692 req->reserve.end);
693 resp->reserve_if_not_allocated.reserved = reserved;
694 alloc_info("<reserve if not allocated> [tid: %ld] "
695 "ahnd: %" PRIx64 ", ctx: %u, vm: %u"
696 ", handle: %u, start: 0x%" PRIx64 ", end: 0x%" PRIx64
697 ", allocated: %d, reserved: %d\n",
698 (long) req->tid, req->allocator_handle,
699 al->ctx, al->vm,
700 req->reserve.handle,
701 req->reserve.start, req->reserve.end,
702 false, reserved);
703 break;
704 }
705
706 if (req->request_type > REQ_CLOSE)
707 pthread_mutex_unlock(&ial->mutex);
708
709 return 0;
710 }
711
712 ret = send_req_recv_resp(channel, req, resp);
713
714 if (ret < 0)
715 exit(0);
716
717 return ret;
718}
719
720static void *allocator_thread_loop(void *data)
721{
722 struct alloc_req req;
723 struct alloc_resp resp;
724 int ret;
725 (void) data;
726
727 alloc_info("Allocator pid: %ld, tid: %ld\n",
728 (long) allocator_pid, (long) gettid());
729 alloc_info("Entering allocator loop\n");
730
Zbigniew Kempczyński9960e3d2020-10-23 09:30:12 +0200731 WRITE_ONCE(allocator_thread_running, true);
732
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200733 while (1) {
734 ret = recv_req(channel, &req);
735
736 if (ret == -1) {
737 igt_warn("Error receiving request in thread, ret = %d [%s]\n",
738 ret, strerror(errno));
739 igt_waitchildren_timeout(1, "Stopping children, error receiving request\n");
740 return (void *) -1;
741 }
742
743 /* Fake message to stop the thread */
744 if (req.request_type == REQ_STOP) {
745 alloc_info("<stop request>\n");
746 break;
747 }
748
749 ret = handle_request(&req, &resp);
750 if (ret) {
751 igt_warn("Error handling request in thread, ret = %d [%s]\n",
752 ret, strerror(errno));
753 break;
754 }
755
756 ret = send_resp(channel, req.tid, &resp);
757 if (ret) {
758 igt_warn("Error sending response in thread, ret = %d [%s]\n",
759 ret, strerror(errno));
760
761 igt_waitchildren_timeout(1, "Stopping children, error sending response\n");
762 return (void *) -1;
763 }
764 }
765
Zbigniew Kempczyński9960e3d2020-10-23 09:30:12 +0200766 WRITE_ONCE(allocator_thread_running, false);
767
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200768 return NULL;
769}
770
Zbigniew Kempczyńskidceb66a2020-11-20 08:40:23 +0100771
772/**
773 * __intel_allocator_multiprocess_prepare:
774 *
775 * Prepares allocator infrastructure to work in multiprocess mode.
776 *
777 * Some description is required why prepare/start steps are separated.
778 * When we write the code and we don't use address sanitizer simple
779 * intel_allocator_multiprocess_start() call is enough. With address
780 * sanitizer and using forking we can encounter situation where one
781 * forked child called allocator alloc() (so parent has some poisoned
782 * memory in shadow map), then second fork occurs. Second child will
783 * get poisoned shadow map from parent (there allocator thread reside).
784 * Checking shadow map in this child will report memory leak.
785 *
786 * How to separate initialization steps take a look into api_intel_allocator.c
787 * fork_simple_stress() function.
788 */
789void __intel_allocator_multiprocess_prepare(void)
790{
791 intel_allocator_init();
792
793 multiprocess = true;
794 channel->init(channel);
795}
796
Zbigniew Kempczyński48d89e22021-04-12 11:32:14 +0200797#define START_TIMEOUT_MS 100
Zbigniew Kempczyńskidceb66a2020-11-20 08:40:23 +0100798void __intel_allocator_multiprocess_start(void)
799{
Zbigniew Kempczyński48d89e22021-04-12 11:32:14 +0200800 int time_left = START_TIMEOUT_MS;
801
Zbigniew Kempczyńskidceb66a2020-11-20 08:40:23 +0100802 pthread_create(&allocator_thread, NULL,
803 allocator_thread_loop, NULL);
Zbigniew Kempczyński48d89e22021-04-12 11:32:14 +0200804
805 /* Wait unless allocator thread get started */
806 while (time_left-- > 0 && !READ_ONCE(allocator_thread_running))
807 usleep(1000);
Zbigniew Kempczyńskidceb66a2020-11-20 08:40:23 +0100808}
809
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200810/**
811 * intel_allocator_multiprocess_start:
812 *
813 * Function turns on intel_allocator multiprocess mode what means
814 * all allocations from children processes are performed in a separate thread
815 * within main igt process. Children are aware of the situation and use
816 * some interprocess communication channel to send/receive messages
817 * (open, close, alloc, free, ...) to/from allocator thread.
818 *
819 * Must be used when you want to use an allocator in non single-process code.
820 * All allocations in threads spawned in main igt process are handled by
821 * mutexing, not by sending/receiving messages to/from allocator thread.
822 *
823 * Note. This destroys all previously created allocators and theirs content.
824 */
825void intel_allocator_multiprocess_start(void)
826{
827 alloc_info("allocator multiprocess start\n");
828
829 igt_assert_f(child_pid == -1,
830 "Allocator thread can be spawned only in main IGT process\n");
Zbigniew Kempczyńskidceb66a2020-11-20 08:40:23 +0100831 __intel_allocator_multiprocess_prepare();
832 __intel_allocator_multiprocess_start();
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200833}
834
835/**
836 * intel_allocator_multiprocess_stop:
837 *
838 * Function turns off intel_allocator multiprocess mode what means
839 * stopping allocator thread and deinitializing its data.
840 */
Zbigniew Kempczyński9960e3d2020-10-23 09:30:12 +0200841#define STOP_TIMEOUT_MS 100
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200842void intel_allocator_multiprocess_stop(void)
843{
Zbigniew Kempczyński9960e3d2020-10-23 09:30:12 +0200844 int time_left = STOP_TIMEOUT_MS;
845
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200846 alloc_info("allocator multiprocess stop\n");
847
848 if (multiprocess) {
849 send_alloc_stop(channel);
Zbigniew Kempczyński9960e3d2020-10-23 09:30:12 +0200850
851 /* Give allocator thread time to complete */
852 while (time_left-- > 0 && READ_ONCE(allocator_thread_running))
853 usleep(1000); /* coarse calculation */
854
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200855 /* Deinit, this should stop all blocked syscalls, if any */
856 channel->deinit(channel);
857 pthread_join(allocator_thread, NULL);
Zbigniew Kempczyński9960e3d2020-10-23 09:30:12 +0200858
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +0200859 /* But we're not sure does child will stuck */
860 igt_waitchildren_timeout(5, "Stopping children");
861 multiprocess = false;
862 }
863}
864
865static uint64_t __intel_allocator_open_full(int fd, uint32_t ctx,
866 uint32_t vm,
867 uint64_t start, uint64_t end,
868 uint8_t allocator_type,
869 enum allocator_strategy strategy)
870{
871 struct alloc_req req = { .request_type = REQ_OPEN,
872 .open.fd = fd,
873 .open.ctx = ctx,
874 .open.vm = vm,
875 .open.start = start,
876 .open.end = end,
877 .open.allocator_type = allocator_type,
878 .open.allocator_strategy = strategy };
879 struct alloc_resp resp;
880
881 /* Get child_tid only once at open() */
882 if (child_tid == -1)
883 child_tid = gettid();
884
885 igt_assert(handle_request(&req, &resp) == 0);
886 igt_assert(resp.open.allocator_handle);
887 igt_assert(resp.response_type == RESP_OPEN);
888
889 return resp.open.allocator_handle;
890}
891
892/**
893 * intel_allocator_open_full:
894 * @fd: i915 descriptor
895 * @ctx: context
896 * @start: address of the beginning
897 * @end: address of the end
898 * @allocator_type: one of INTEL_ALLOCATOR_* define
899 * @strategy: passed to the allocator to define the strategy (like order
900 * of allocation, see notes below).
901 *
902 * Function opens an allocator instance within <@start, @end) vm for given
903 * @fd and @ctx and returns its handle. If the allocator for such pair
904 * doesn't exist it is created with refcount = 1.
905 * Parallel opens returns same handle bumping its refcount.
906 *
907 * Returns: unique handle to the currently opened allocator.
908 *
909 * Notes:
910 * Strategy is generally used internally by the underlying allocator:
911 *
912 * For SIMPLE allocator:
913 * - ALLOC_STRATEGY_HIGH_TO_LOW means topmost addresses are allocated first,
914 * - ALLOC_STRATEGY_LOW_TO_HIGH opposite, allocation starts from lowest
915 * addresses.
916 *
917 * For RANDOM allocator:
918 * - none of strategy is currently implemented.
919 */
920uint64_t intel_allocator_open_full(int fd, uint32_t ctx,
921 uint64_t start, uint64_t end,
922 uint8_t allocator_type,
923 enum allocator_strategy strategy)
924{
925 return __intel_allocator_open_full(fd, ctx, 0, start, end,
926 allocator_type, strategy);
927}
928
929uint64_t intel_allocator_open_vm_full(int fd, uint32_t vm,
930 uint64_t start, uint64_t end,
931 uint8_t allocator_type,
932 enum allocator_strategy strategy)
933{
934 igt_assert(vm != 0);
935 return __intel_allocator_open_full(fd, 0, vm, start, end,
936 allocator_type, strategy);
937}
938
939/**
940 * intel_allocator_open:
941 * @fd: i915 descriptor
942 * @ctx: context
943 * @allocator_type: one of INTEL_ALLOCATOR_* define
944 *
945 * Function opens an allocator instance for given @fd and @ctx and returns
946 * its handle. If the allocator for such pair doesn't exist it is created
947 * with refcount = 1. Parallel opens returns same handle bumping its refcount.
948 *
949 * Returns: unique handle to the currently opened allocator.
950 *
951 * Notes: we pass ALLOC_STRATEGY_HIGH_TO_LOW as default, playing with higher
952 * addresses makes easier to find addressing issues (like passing non-canonical
953 * offsets, which won't be catched unless 47-bit is set).
954 */
955uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type)
956{
957 return intel_allocator_open_full(fd, ctx, 0, 0, allocator_type,
958 ALLOC_STRATEGY_HIGH_TO_LOW);
959}
960
961uint64_t intel_allocator_open_vm(int fd, uint32_t vm, uint8_t allocator_type)
962{
963 return intel_allocator_open_vm_full(fd, vm, 0, 0, allocator_type,
964 ALLOC_STRATEGY_HIGH_TO_LOW);
965}
966
967uint64_t intel_allocator_open_vm_as(uint64_t allocator_handle, uint32_t new_vm)
968{
969 struct alloc_req req = { .request_type = REQ_OPEN_AS,
970 .allocator_handle = allocator_handle,
971 .open_as.new_vm = new_vm };
972 struct alloc_resp resp;
973
974 /* Get child_tid only once at open() */
975 if (child_tid == -1)
976 child_tid = gettid();
977
978 igt_assert(handle_request(&req, &resp) == 0);
979 igt_assert(resp.open_as.allocator_handle);
980 igt_assert(resp.response_type == RESP_OPEN_AS);
981
982 return resp.open.allocator_handle;
983}
984
985/**
986 * intel_allocator_close:
987 * @allocator_handle: handle to the allocator that will be closed
988 *
989 * Function decreases an allocator refcount for the given @handle.
990 * When refcount reaches zero allocator is closed (destroyed) and all
991 * allocated / reserved areas are freed.
992 *
993 * Returns: true if closed allocator was empty, false otherwise.
994 */
995bool intel_allocator_close(uint64_t allocator_handle)
996{
997 struct alloc_req req = { .request_type = REQ_CLOSE,
998 .allocator_handle = allocator_handle };
999 struct alloc_resp resp;
1000
1001 igt_assert(handle_request(&req, &resp) == 0);
1002 igt_assert(resp.response_type == RESP_CLOSE);
1003
1004 return resp.close.is_empty;
1005}
1006
1007/**
1008 * intel_allocator_get_address_range:
1009 * @allocator_handle: handle to an allocator
1010 * @startp: pointer to the variable where function writes starting offset
1011 * @endp: pointer to the variable where function writes ending offset
1012 *
1013 * Function fills @startp, @endp with respectively, starting and ending offset
1014 * of the allocator working virtual address space range.
1015 *
1016 * Note. Allocators working ranges can differ depending on the device or
1017 * the allocator type so before reserving a specific offset a good practise
1018 * is to ensure that address is between accepted range.
1019 */
1020void intel_allocator_get_address_range(uint64_t allocator_handle,
1021 uint64_t *startp, uint64_t *endp)
1022{
1023 struct alloc_req req = { .request_type = REQ_ADDRESS_RANGE,
1024 .allocator_handle = allocator_handle };
1025 struct alloc_resp resp;
1026
1027 igt_assert(handle_request(&req, &resp) == 0);
1028 igt_assert(resp.response_type == RESP_ADDRESS_RANGE);
1029
1030 if (startp)
1031 *startp = resp.address_range.start;
1032
1033 if (endp)
1034 *endp = resp.address_range.end;
1035}
1036
1037/**
1038 * __intel_allocator_alloc:
1039 * @allocator_handle: handle to an allocator
1040 * @handle: handle to an object
1041 * @size: size of an object
1042 * @alignment: determines object alignment
1043 *
1044 * Function finds and returns the most suitable offset with given @alignment
1045 * for an object with @size identified by the @handle.
1046 *
1047 * Returns: currently assigned address for a given object. If an object was
1048 * already allocated returns same address. If allocator can't find suitable
1049 * range returns ALLOC_INVALID_ADDRESS.
1050 */
1051uint64_t __intel_allocator_alloc(uint64_t allocator_handle, uint32_t handle,
Zbigniew Kempczyński35b74162021-04-02 11:15:39 +02001052 uint64_t size, uint64_t alignment,
1053 enum allocator_strategy strategy)
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +02001054{
1055 struct alloc_req req = { .request_type = REQ_ALLOC,
1056 .allocator_handle = allocator_handle,
1057 .alloc.handle = handle,
1058 .alloc.size = size,
Zbigniew Kempczyński35b74162021-04-02 11:15:39 +02001059 .alloc.alignment = alignment,
1060 .alloc.strategy = strategy };
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +02001061 struct alloc_resp resp;
1062
1063 igt_assert(handle_request(&req, &resp) == 0);
1064 igt_assert(resp.response_type == RESP_ALLOC);
1065
1066 return resp.alloc.offset;
1067}
1068
1069/**
1070 * intel_allocator_alloc:
1071 * @allocator_handle: handle to an allocator
1072 * @handle: handle to an object
1073 * @size: size of an object
1074 * @alignment: determines object alignment
1075 *
1076 * Same as __intel_allocator_alloc() but asserts if allocator can't return
Zbigniew Kempczyński35b74162021-04-02 11:15:39 +02001077 * valid address. Uses default allocation strategy chosen during opening
1078 * the allocator.
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +02001079 */
1080uint64_t intel_allocator_alloc(uint64_t allocator_handle, uint32_t handle,
1081 uint64_t size, uint64_t alignment)
1082{
1083 uint64_t offset;
1084
1085 offset = __intel_allocator_alloc(allocator_handle, handle,
Zbigniew Kempczyński35b74162021-04-02 11:15:39 +02001086 size, alignment,
1087 ALLOC_STRATEGY_NONE);
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +02001088 igt_assert(offset != ALLOC_INVALID_ADDRESS);
1089
1090 return offset;
1091}
1092
1093/**
Zbigniew Kempczyński35b74162021-04-02 11:15:39 +02001094 * intel_allocator_alloc_with_strategy:
1095 * @allocator_handle: handle to an allocator
1096 * @handle: handle to an object
1097 * @size: size of an object
1098 * @alignment: determines object alignment
1099 * @strategy: strategy of allocation
1100 *
1101 * Same as __intel_allocator_alloc() but asserts if allocator can't return
1102 * valid address. Use @strategy instead of default chosen during opening
1103 * the allocator.
1104 */
1105uint64_t intel_allocator_alloc_with_strategy(uint64_t allocator_handle,
1106 uint32_t handle,
1107 uint64_t size, uint64_t alignment,
1108 enum allocator_strategy strategy)
1109{
1110 uint64_t offset;
1111
1112 offset = __intel_allocator_alloc(allocator_handle, handle,
1113 size, alignment, strategy);
1114 igt_assert(offset != ALLOC_INVALID_ADDRESS);
1115
1116 return offset;
1117}
1118
1119
1120/**
Zbigniew Kempczyńskie4d8b8b2020-10-23 09:30:11 +02001121 * intel_allocator_free:
1122 * @allocator_handle: handle to an allocator
1123 * @handle: handle to an object to be freed
1124 *
1125 * Function free object identified by the @handle in allocator what makes it
1126 * offset again allocable.
1127 *
1128 * Note. Reserved objects can only be freed by an #intel_allocator_unreserve
1129 * function.
1130 *
1131 * Returns: true if the object was successfully freed, otherwise false.
1132 */
1133bool intel_allocator_free(uint64_t allocator_handle, uint32_t handle)
1134{
1135 struct alloc_req req = { .request_type = REQ_FREE,
1136 .allocator_handle = allocator_handle,
1137 .free.handle = handle };
1138 struct alloc_resp resp;
1139
1140 igt_assert(handle_request(&req, &resp) == 0);
1141 igt_assert(resp.response_type == RESP_FREE);
1142
1143 return resp.free.freed;
1144}
1145
1146/**
1147 * intel_allocator_is_allocated:
1148 * @allocator_handle: handle to an allocator
1149 * @handle: handle to an object
1150 * @size: size of an object
1151 * @offset: address of an object
1152 *
1153 * Function checks whether the object identified by the @handle and @size
1154 * is allocated at the @offset.
1155 *
1156 * Returns: true if the object is currently allocated at the @offset,
1157 * otherwise false.
1158 */
1159bool intel_allocator_is_allocated(uint64_t allocator_handle, uint32_t handle,
1160 uint64_t size, uint64_t offset)
1161{
1162 struct alloc_req req = { .request_type = REQ_IS_ALLOCATED,
1163 .allocator_handle = allocator_handle,
1164 .is_allocated.handle = handle,
1165 .is_allocated.size = size,
1166 .is_allocated.offset = offset };
1167 struct alloc_resp resp;
1168
1169 igt_assert(handle_request(&req, &resp) == 0);
1170 igt_assert(resp.response_type == RESP_IS_ALLOCATED);
1171
1172 return resp.is_allocated.allocated;
1173}
1174
1175/**
1176 * intel_allocator_reserve:
1177 * @allocator_handle: handle to an allocator
1178 * @handle: handle to an object
1179 * @size: size of an object
1180 * @offset: address of an object
1181 *
1182 * Function reserves space that starts at the @offset and has @size.
1183 * Optionally we can pass @handle to mark that space is for a specific
1184 * object, otherwise pass -1.
1185 *
1186 * Note. Reserved space is identified by offset and size, not a handle.
1187 * So an object can have multiple reserved spaces with its handle.
1188 *
1189 * Returns: true if space is successfully reserved, otherwise false.
1190 */
1191bool intel_allocator_reserve(uint64_t allocator_handle, uint32_t handle,
1192 uint64_t size, uint64_t offset)
1193{
1194 struct alloc_req req = { .request_type = REQ_RESERVE,
1195 .allocator_handle = allocator_handle,
1196 .reserve.handle = handle,
1197 .reserve.start = offset,
1198 .reserve.end = offset + size };
1199 struct alloc_resp resp;
1200
1201 igt_assert(handle_request(&req, &resp) == 0);
1202 igt_assert(resp.response_type == RESP_RESERVE);
1203
1204 return resp.reserve.reserved;
1205}
1206
1207/**
1208 * intel_allocator_unreserve:
1209 * @allocator_handle: handle to an allocator
1210 * @handle: handle to an object
1211 * @size: size of an object
1212 * @offset: address of an object
1213 *
1214 * Function unreserves space that starts at the @offset, @size and @handle.
1215 *
1216 * Note. @handle, @size and @offset have to match those used in reservation.
1217 * i.e. check with the same offset but even smaller size will fail.
1218 *
1219 * Returns: true if the space is successfully unreserved, otherwise false.
1220 */
1221bool intel_allocator_unreserve(uint64_t allocator_handle, uint32_t handle,
1222 uint64_t size, uint64_t offset)
1223{
1224 struct alloc_req req = { .request_type = REQ_UNRESERVE,
1225 .allocator_handle = allocator_handle,
1226 .unreserve.handle = handle,
1227 .unreserve.start = offset,
1228 .unreserve.end = offset + size };
1229 struct alloc_resp resp;
1230
1231 igt_assert(handle_request(&req, &resp) == 0);
1232 igt_assert(resp.response_type == RESP_UNRESERVE);
1233
1234 return resp.unreserve.unreserved;
1235}
1236
1237/**
1238 * intel_allocator_is_reserved:
1239 * @allocator_handle: handle to an allocator
1240 * @size: size of an object
1241 * @offset: address of an object
1242 *
1243 * Function checks whether space starting at the @offset and @size is
1244 * currently under reservation.
1245 *
1246 * Note. @size and @offset have to match those used in reservation,
1247 * i.e. check with the same offset but even smaller size will fail.
1248 *
1249 * Returns: true if space is reserved, othwerise false.
1250 */
1251bool intel_allocator_is_reserved(uint64_t allocator_handle,
1252 uint64_t size, uint64_t offset)
1253{
1254 struct alloc_req req = { .request_type = REQ_IS_RESERVED,
1255 .allocator_handle = allocator_handle,
1256 .is_reserved.start = offset,
1257 .is_reserved.end = offset + size };
1258 struct alloc_resp resp;
1259
1260 igt_assert(handle_request(&req, &resp) == 0);
1261 igt_assert(resp.response_type == RESP_IS_RESERVED);
1262
1263 return resp.is_reserved.reserved;
1264}
1265
1266/**
1267 * intel_allocator_reserve_if_not_allocated:
1268 * @allocator_handle: handle to an allocator
1269 * @handle: handle to an object
1270 * @size: size of an object
1271 * @offset: address of an object
1272 * @is_allocatedp: if not NULL function writes there object allocation status
1273 * (true/false)
1274 *
1275 * Function checks whether the object identified by the @handle and @size
1276 * is allocated at the @offset and writes the result to @is_allocatedp.
1277 * If it's not it reserves it at the given @offset.
1278 *
1279 * Returns: true if the space for an object was reserved, otherwise false.
1280 */
1281bool intel_allocator_reserve_if_not_allocated(uint64_t allocator_handle,
1282 uint32_t handle,
1283 uint64_t size, uint64_t offset,
1284 bool *is_allocatedp)
1285{
1286 struct alloc_req req = { .request_type = REQ_RESERVE_IF_NOT_ALLOCATED,
1287 .allocator_handle = allocator_handle,
1288 .reserve.handle = handle,
1289 .reserve.start = offset,
1290 .reserve.end = offset + size };
1291 struct alloc_resp resp;
1292
1293 igt_assert(handle_request(&req, &resp) == 0);
1294 igt_assert(resp.response_type == RESP_RESERVE_IF_NOT_ALLOCATED);
1295
1296 if (is_allocatedp)
1297 *is_allocatedp = resp.reserve_if_not_allocated.allocated;
1298
1299 return resp.reserve_if_not_allocated.reserved;
1300}
1301
1302/**
1303 * intel_allocator_print:
1304 * @allocator_handle: handle to an allocator
1305 *
1306 * Function prints statistics and content of the allocator.
1307 * Mainly for debugging purposes.
1308 *
1309 * Note. Printing possible only in the main process.
1310 **/
1311void intel_allocator_print(uint64_t allocator_handle)
1312{
1313 igt_assert(allocator_handle);
1314
1315 if (!multiprocess || is_same_process()) {
1316 struct allocator *al;
1317
1318 al = __allocator_find_by_handle(allocator_handle);
1319 pthread_mutex_lock(&map_mutex);
1320 al->ial->print(al->ial, true);
1321 pthread_mutex_unlock(&map_mutex);
1322 } else {
1323 igt_warn("Print stats is in main process only\n");
1324 }
1325}
1326
1327static int equal_handles(const void *key1, const void *key2)
1328{
1329 const struct handle_entry *h1 = key1, *h2 = key2;
1330
1331 alloc_debug("h1: %llx, h2: %llx\n",
1332 (long long) h1->handle, (long long) h2->handle);
1333
1334 return h1->handle == h2->handle;
1335}
1336
1337static int equal_ctx(const void *key1, const void *key2)
1338{
1339 const struct allocator *a1 = key1, *a2 = key2;
1340
1341 alloc_debug("a1: <fd: %d, ctx: %u>, a2 <fd: %d, ctx: %u>\n",
1342 a1->fd, a1->ctx, a2->fd, a2->ctx);
1343
1344 return a1->fd == a2->fd && a1->ctx == a2->ctx;
1345}
1346
1347static int equal_vm(const void *key1, const void *key2)
1348{
1349 const struct allocator *a1 = key1, *a2 = key2;
1350
1351 alloc_debug("a1: <fd: %d, vm: %u>, a2 <fd: %d, vm: %u>\n",
1352 a1->fd, a1->vm, a2->fd, a2->vm);
1353
1354 return a1->fd == a2->fd && a1->vm == a2->vm;
1355}
1356
1357/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
1358#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
1359
1360static inline uint32_t hash_handles(const void *val)
1361{
1362 uint32_t hash = ((struct handle_entry *) val)->handle;
1363
1364 hash = hash * GOLDEN_RATIO_PRIME_32;
1365 return hash;
1366}
1367
1368static inline uint32_t hash_instance(const void *val)
1369{
1370 uint64_t hash = ((struct allocator *) val)->fd;
1371
1372 hash = hash * GOLDEN_RATIO_PRIME_32;
1373 return hash;
1374}
1375
1376static void __free_maps(struct igt_map *map, bool close_allocators)
1377{
1378 struct igt_map_entry *pos;
1379 const struct handle_entry *h;
1380
1381 if (!map)
1382 return;
1383
1384 if (close_allocators)
1385 igt_map_foreach(map, pos) {
1386 h = pos->key;
1387 allocator_close(h->handle);
1388 }
1389
1390 igt_map_destroy(map, map_entry_free_func);
1391}
1392
1393/**
1394 * intel_allocator_init:
1395 *
1396 * Function initializes the allocators infrastructure. The second call will
1397 * override current infra and destroy existing there allocators. It is called
1398 * in igt_constructor.
1399 **/
1400void intel_allocator_init(void)
1401{
1402 alloc_info("Prepare an allocator infrastructure\n");
1403
1404 allocator_pid = getpid();
1405 alloc_info("Allocator pid: %ld\n", (long) allocator_pid);
1406
1407 __free_maps(handles, true);
1408 __free_maps(ctx_map, false);
1409 __free_maps(vm_map, false);
1410
1411 atomic_init(&next_handle, 1);
1412 handles = igt_map_create(hash_handles, equal_handles);
1413 ctx_map = igt_map_create(hash_instance, equal_ctx);
1414 vm_map = igt_map_create(hash_instance, equal_vm);
1415 igt_assert(handles && ctx_map && vm_map);
1416
1417 channel = intel_allocator_get_msgchannel(CHANNEL_SYSVIPC_MSGQUEUE);
1418}
1419
1420igt_constructor {
1421 intel_allocator_init();
1422}