forked from torvalds/linux
/
i915_gem_execbuffer.c
3586 lines (2986 loc) · 90.9 KB
/
i915_gem_execbuffer.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2008,2010 Intel Corporation
*/
#include <linux/dma-resv.h>
#include <linux/highmem.h>
#include <linux/intel-iommu.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drm_syncobj.h>
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
#include "pxp/intel_pxp.h"
#include "i915_cmd_parser.h"
#include "i915_drv.h"
#include "i915_file_private.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_evict.h"
#include "i915_gem_ioctls.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
struct eb_vma {
struct i915_vma *vma;
unsigned int flags;
/** This vma's place in the execbuf reservation list */
struct drm_i915_gem_exec_object2 *exec;
struct list_head bind_link;
struct list_head reloc_link;
struct hlist_node node;
u32 handle;
};
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
/* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */
#define __EXEC_OBJECT_HAS_PIN BIT(30)
#define __EXEC_OBJECT_HAS_FENCE BIT(29)
#define __EXEC_OBJECT_USERPTR_INIT BIT(28)
#define __EXEC_OBJECT_NEEDS_MAP BIT(27)
#define __EXEC_OBJECT_NEEDS_BIAS BIT(26)
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 26) /* all of the above + */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
#define __EXEC_ENGINE_PINNED BIT(30)
#define __EXEC_USERPTR_USED BIT(29)
#define __EXEC_INTERNAL_FLAGS (~0u << 29)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
#define __I915_EXEC_ILLEGAL_FLAGS \
(__I915_EXEC_UNKNOWN_FLAGS | \
I915_EXEC_CONSTANTS_MASK | \
I915_EXEC_RESOURCE_STREAMER)
/* Catch emission of unexpected errors for CI! */
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#undef EINVAL
#define EINVAL ({ \
DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
22; \
})
#endif
/**
* DOC: User command execution
*
* Userspace submits commands to be executed on the GPU as an instruction
* stream within a GEM object we call a batchbuffer. This instructions may
* refer to other GEM objects containing auxiliary state such as kernels,
* samplers, render targets and even secondary batchbuffers. Userspace does
* not know where in the GPU memory these objects reside and so before the
* batchbuffer is passed to the GPU for execution, those addresses in the
* batchbuffer and auxiliary objects are updated. This is known as relocation,
* or patching. To try and avoid having to relocate each object on the next
* execution, userspace is told the location of those objects in this pass,
* but this remains just a hint as the kernel may choose a new location for
* any object in the future.
*
* At the level of talking to the hardware, submitting a batchbuffer for the
* GPU to execute is to add content to a buffer from which the HW
* command streamer is reading.
*
* 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
* Execlists, this command is not placed on the same buffer as the
* remaining items.
*
* 2. Add a command to invalidate caches to the buffer.
*
* 3. Add a batchbuffer start command to the buffer; the start command is
* essentially a token together with the GPU address of the batchbuffer
* to be executed.
*
* 4. Add a pipeline flush to the buffer.
*
* 5. Add a memory write command to the buffer to record when the GPU
* is done executing the batchbuffer. The memory write writes the
* global sequence number of the request, ``i915_request::global_seqno``;
* the i915 driver uses the current value in the register to determine
* if the GPU has completed the batchbuffer.
*
* 6. Add a user interrupt command to the buffer. This command instructs
* the GPU to issue an interrupt when the command, pipeline flush and
* memory write are completed.
*
* 7. Inform the hardware of the additional commands added to the buffer
* (by updating the tail pointer).
*
* Processing an execbuf ioctl is conceptually split up into a few phases.
*
* 1. Validation - Ensure all the pointers, handles and flags are valid.
* 2. Reservation - Assign GPU address space for every object
* 3. Relocation - Update any addresses to point to the final locations
* 4. Serialisation - Order the request with respect to its dependencies
* 5. Construction - Construct a request to execute the batchbuffer
* 6. Submission (at some point in the future execution)
*
* Reserving resources for the execbuf is the most complicated phase. We
* neither want to have to migrate the object in the address space, nor do
* we want to have to update any relocations pointing to this object. Ideally,
* we want to leave the object where it is and for all the existing relocations
* to match. If the object is given a new address, or if userspace thinks the
* object is elsewhere, we have to parse all the relocation entries and update
* the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
* all the target addresses in all of its objects match the value in the
* relocation entries and that they all match the presumed offsets given by the
* list of execbuffer objects. Using this knowledge, we know that if we haven't
* moved any buffers, all the relocation entries are valid and we can skip
* the update. (If userspace is wrong, the likely outcome is an impromptu GPU
* hang.) The requirement for using I915_EXEC_NO_RELOC are:
*
* The addresses written in the objects must match the corresponding
* reloc.presumed_offset which in turn must match the corresponding
* execobject.offset.
*
* Any render targets written to in the batch must be flagged with
* EXEC_OBJECT_WRITE.
*
* To avoid stalling, execobject.offset should match the current
* address of that object within the active context.
*
* The reservation is done is multiple phases. First we try and keep any
* object already bound in its current location - so as long as meets the
* constraints imposed by the new execbuffer. Any object left unbound after the
* first pass is then fitted into any available idle space. If an object does
* not fit, all objects are removed from the reservation and the process rerun
* after sorting the objects into a priority order (more difficult to fit
* objects are tried first). Failing that, the entire VM is cleared and we try
* to fit the execbuf once last time before concluding that it simply will not
* fit.
*
* A small complication to all of this is that we allow userspace not only to
* specify an alignment and a size for the object in the address space, but
* we also allow userspace to specify the exact offset. This objects are
* simpler to place (the location is known a priori) all we have to do is make
* sure the space is available.
*
* Once all the objects are in place, patching up the buried pointers to point
* to the final locations is a fairly simple job of walking over the relocation
* entry arrays, looking up the right address and rewriting the value into
* the object. Simple! ... The relocation entries are stored in user memory
* and so to access them we have to copy them into a local buffer. That copy
* has to avoid taking any pagefaults as they may lead back to a GEM object
* requiring the struct_mutex (i.e. recursive deadlock). So once again we split
* the relocation into multiple passes. First we try to do everything within an
* atomic context (avoid the pagefaults) which requires that we never wait. If
* we detect that we may wait, or if we need to fault, then we have to fallback
* to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
* bells yet?) Dropping the mutex means that we lose all the state we have
* built up so far for the execbuf and we must reset any global data. However,
* we do leave the objects pinned in their final locations - which is a
* potential issue for concurrent execbufs. Once we have left the mutex, we can
* allocate and copy all the relocation entries into a large array at our
* leisure, reacquire the mutex, reclaim all the objects and other state and
* then proceed to update any incorrect addresses with the objects.
*
* As we process the relocation entries, we maintain a record of whether the
* object is being written to. Using NORELOC, we expect userspace to provide
* this information instead. We also check whether we can skip the relocation
* by comparing the expected value inside the relocation entry with the target's
* final address. If they differ, we have to map the current object and rewrite
* the 4 or 8 byte pointer within.
*
* Serialising an execbuf is quite simple according to the rules of the GEM
* ABI. Execution within each context is ordered by the order of submission.
* Writes to any GEM object are in order of submission and are exclusive. Reads
* from a GEM object are unordered with respect to other reads, but ordered by
* writes. A write submitted after a read cannot occur before the read, and
* similarly any read submitted after a write cannot occur before the write.
* Writes are ordered between engines such that only one write occurs at any
* time (completing any reads beforehand) - using semaphores where available
* and CPU serialisation otherwise. Other GEM access obey the same rules, any
* write (either via mmaps using set-domain, or via pwrite) must flush all GPU
* reads before starting, and any read (either using set-domain or pread) must
* flush all GPU writes before starting. (Note we only employ a barrier before,
* we currently rely on userspace not concurrently starting a new execution
* whilst reading or writing to an object. This may be an advantage or not
* depending on how much you trust userspace not to shoot themselves in the
* foot.) Serialisation may just result in the request being inserted into
* a DAG awaiting its turn, but most simple is to wait on the CPU until
* all dependencies are resolved.
*
* After all of that, is just a matter of closing the request and handing it to
* the hardware (well, leaving it in a queue to be executed). However, we also
* offer the ability for batchbuffers to be run with elevated privileges so
* that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
* Before any batch is given extra privileges we first must check that it
* contains no nefarious instructions, we check that each instruction is from
* our whitelist and all registers are also from an allowed list. We first
* copy the user's batchbuffer to a shadow (so that the user doesn't have
* access to it, either by the CPU or GPU as we scan it) and then parse each
* instruction. If everything is ok, we set a flag telling the hardware to run
* the batchbuffer in trusted mode, otherwise the ioctl is rejected.
*/
struct eb_fence {
struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */
struct dma_fence *dma_fence;
u64 value;
struct dma_fence_chain *chain_fence;
};
struct i915_execbuffer {
struct drm_i915_private *i915; /** i915 backpointer */
struct drm_file *file; /** per-file lookup tables and limits */
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
struct eb_vma *vma;
struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
/** identity of the batch obj/vma */
struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1];
struct i915_vma *trampoline; /** trampoline used for chaining */
/** used for excl fence in dma_resv objects when > 1 BB submitted */
struct dma_fence *composite_fence;
/** actual size of execobj[] as we may extend it for the cmdparser */
unsigned int buffer_count;
/* number of batches in execbuf IOCTL */
unsigned int num_batches;
/** list of vma not yet bound during reservation phase */
struct list_head unbound;
/** list of vma that have execobj.relocation_count */
struct list_head relocs;
struct i915_gem_ww_ctx ww;
/**
* Track the most recently used object for relocations, as we
* frequently have to perform multiple relocations within the same
* obj/page
*/
struct reloc_cache {
struct drm_mm_node node; /** temporary GTT binding */
unsigned long vaddr; /** Current kmap address */
unsigned long page; /** Currently mapped page index */
unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */
bool use_64bit_reloc : 1;
bool has_llc : 1;
bool has_fence : 1;
bool needs_unfenced : 1;
} reloc_cache;
u64 invalid_flags; /** Set of execobj.flags that are invalid */
/** Length of batch within object */
u64 batch_len[MAX_ENGINE_INSTANCE + 1];
u32 batch_start_offset; /** Location within object of batch */
u32 batch_flags; /** Flags composed for emit_bb_start() */
struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
/**
* Indicate either the size of the hastable used to resolve
* relocation handles, or if negative that we are using a direct
* index into the execobj[].
*/
int lut_size;
struct hlist_head *buckets; /** ht for relocation handles */
struct eb_fence *fences;
unsigned long num_fences;
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1];
#endif
};
static int eb_parse(struct i915_execbuffer *eb);
static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
static void eb_unpin_engine(struct i915_execbuffer *eb);
static void eb_capture_release(struct i915_execbuffer *eb);
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->context->engine) ||
(intel_engine_using_cmd_parser(eb->context->engine) &&
eb->args->batch_len);
}
static int eb_create(struct i915_execbuffer *eb)
{
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
unsigned int size = 1 + ilog2(eb->buffer_count);
/*
* Without a 1:1 association between relocation handles and
* the execobject[] index, we instead create a hashtable.
* We size it dynamically based on available memory, starting
* first with 1:1 assocative hash and scaling back until
* the allocation succeeds.
*
* Later on we use a positive lut_size to indicate we are
* using this hashtable, and a negative value to indicate a
* direct lookup.
*/
do {
gfp_t flags;
/* While we can still reduce the allocation size, don't
* raise a warning and allow the allocation to fail.
* On the last pass though, we want to try as hard
* as possible to perform the allocation and warn
* if it fails.
*/
flags = GFP_KERNEL;
if (size > 1)
flags |= __GFP_NORETRY | __GFP_NOWARN;
eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
flags);
if (eb->buckets)
break;
} while (--size);
if (unlikely(!size))
return -ENOMEM;
eb->lut_size = size;
} else {
eb->lut_size = -eb->buffer_count;
}
return 0;
}
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
const struct i915_vma *vma,
unsigned int flags)
{
if (vma->node.size < entry->pad_to_size)
return true;
if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
return true;
if (flags & EXEC_OBJECT_PINNED &&
vma->node.start != entry->offset)
return true;
if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
(vma->node.start + vma->node.size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_map_and_fenceable(vma))
return true;
return false;
}
static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
unsigned int exec_flags)
{
u64 pin_flags = 0;
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
pin_flags |= PIN_GLOBAL;
/*
* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
* limit address to the first 4GBs for unflagged objects.
*/
if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
pin_flags |= PIN_ZONE_4G;
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
pin_flags |= PIN_MAPPABLE;
if (exec_flags & EXEC_OBJECT_PINNED)
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
return pin_flags;
}
static inline int
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
struct eb_vma *ev)
{
struct i915_vma *vma = ev->vma;
u64 pin_flags;
int err;
if (vma->node.size)
pin_flags = vma->node.start;
else
pin_flags = entry->offset & PIN_OFFSET_MASK;
pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED | PIN_VALIDATE;
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
/* Attempt to reuse the current location if available */
err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags);
if (err == -EDEADLK)
return err;
if (unlikely(err)) {
if (entry->flags & EXEC_OBJECT_PINNED)
return err;
/* Failing that pick any _free_ space if suitable */
err = i915_vma_pin_ww(vma, &eb->ww,
entry->pad_to_size,
entry->alignment,
eb_pin_flags(entry, ev->flags) |
PIN_USER | PIN_NOEVICT | PIN_VALIDATE);
if (unlikely(err))
return err;
}
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
if (unlikely(err))
return err;
if (vma->fence)
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
ev->flags |= __EXEC_OBJECT_HAS_PIN;
if (eb_vma_misplaced(entry, vma, ev->flags))
return -EBADSLT;
return 0;
}
static inline void
eb_unreserve_vma(struct eb_vma *ev)
{
if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
__i915_vma_unpin_fence(ev->vma);
ev->flags &= ~__EXEC_OBJECT_RESERVED;
}
static int
eb_validate_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry,
struct i915_vma *vma)
{
/* Relocations are disallowed for all platforms after TGL-LP. This
* also covers all platforms with local memory.
*/
if (entry->relocation_count &&
GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
return -EINVAL;
if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL;
if (unlikely(entry->alignment &&
!is_power_of_2_u64(entry->alignment)))
return -EINVAL;
/*
* Offset can be used as input (EXEC_OBJECT_PINNED), reject
* any non-page-aligned or non-canonical addresses.
*/
if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
return -EINVAL;
/* pad_to_size was once a reserved field, so sanitize it */
if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
if (unlikely(offset_in_page(entry->pad_to_size)))
return -EINVAL;
} else {
entry->pad_to_size = 0;
}
/*
* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
* form internally.
*/
entry->offset = gen8_noncanonical_addr(entry->offset);
if (!eb->reloc_cache.has_fence) {
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
} else {
if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
eb->reloc_cache.needs_unfenced) &&
i915_gem_object_is_tiled(vma->obj))
entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
}
return 0;
}
static inline bool
is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx)
{
return eb->args->flags & I915_EXEC_BATCH_FIRST ?
buffer_idx < eb->num_batches :
buffer_idx >= eb->args->buffer_count - eb->num_batches;
}
static int
eb_add_vma(struct i915_execbuffer *eb,
unsigned int *current_batch,
unsigned int i,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = eb->i915;
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
ev->vma = vma;
ev->exec = entry;
ev->flags = entry->flags;
if (eb->lut_size > 0) {
ev->handle = entry->handle;
hlist_add_head(&ev->node,
&eb->buckets[hash_32(entry->handle,
eb->lut_size)]);
}
if (entry->relocation_count)
list_add_tail(&ev->reloc_link, &eb->relocs);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
* to negative relocation deltas. Usually that works out ok since the
* relocate address is still positive, except when the batch is placed
* very low in the GTT. Ensure this doesn't happen.
*
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
if (is_batch_buffer(eb, i)) {
if (entry->relocation_count &&
!(ev->flags & EXEC_OBJECT_PINNED))
ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
eb->batches[*current_batch] = ev;
if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) {
drm_dbg(&i915->drm,
"Attempting to use self-modifying batch buffer\n");
return -EINVAL;
}
if (range_overflows_t(u64,
eb->batch_start_offset,
eb->args->batch_len,
ev->vma->size)) {
drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
return -EINVAL;
}
if (eb->args->batch_len == 0)
eb->batch_len[*current_batch] = ev->vma->size -
eb->batch_start_offset;
else
eb->batch_len[*current_batch] = eb->args->batch_len;
if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */
drm_dbg(&i915->drm, "Invalid batch length\n");
return -EINVAL;
}
++*current_batch;
}
return 0;
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
const struct drm_i915_gem_object *obj)
{
if (!i915_gem_object_has_struct_page(obj))
return false;
if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
return true;
if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
return false;
return (cache->has_llc ||
obj->cache_dirty ||
obj->cache_level != I915_CACHE_NONE);
}
static int eb_reserve_vma(struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
{
struct drm_i915_gem_exec_object2 *entry = ev->exec;
struct i915_vma *vma = ev->vma;
int err;
if (drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(entry, vma, ev->flags)) {
err = i915_vma_unbind(vma);
if (err)
return err;
}
err = i915_vma_pin_ww(vma, &eb->ww,
entry->pad_to_size, entry->alignment,
eb_pin_flags(entry, ev->flags) | pin_flags);
if (err)
return err;
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_pin_fence(vma);
if (unlikely(err))
return err;
if (vma->fence)
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
ev->flags |= __EXEC_OBJECT_HAS_PIN;
GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
}
static bool eb_unbind(struct i915_execbuffer *eb, bool force)
{
const unsigned int count = eb->buffer_count;
unsigned int i;
struct list_head last;
bool unpinned = false;
/* Resort *all* the objects into priority order */
INIT_LIST_HEAD(&eb->unbound);
INIT_LIST_HEAD(&last);
for (i = 0; i < count; i++) {
struct eb_vma *ev = &eb->vma[i];
unsigned int flags = ev->flags;
if (!force && flags & EXEC_OBJECT_PINNED &&
flags & __EXEC_OBJECT_HAS_PIN)
continue;
unpinned = true;
eb_unreserve_vma(ev);
if (flags & EXEC_OBJECT_PINNED)
/* Pinned must have their slot */
list_add(&ev->bind_link, &eb->unbound);
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
/* Map require the lowest 256MiB (aperture) */
list_add_tail(&ev->bind_link, &eb->unbound);
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
/* Prioritise 4GiB region for restricted bo */
list_add(&ev->bind_link, &last);
else
list_add_tail(&ev->bind_link, &last);
}
list_splice_tail(&last, &eb->unbound);
return unpinned;
}
static int eb_reserve(struct i915_execbuffer *eb)
{
struct eb_vma *ev;
unsigned int pass;
int err = 0;
bool unpinned;
/*
* Attempt to pin all of the buffers into the GTT.
* This is done in 2 phases:
*
* 1. Unbind all objects that do not match the GTT constraints for
* the execbuffer (fenceable, mappable, alignment etc).
* 2. Bind new objects.
*
* This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
*
* Defragmenting is skipped if all objects are pinned at a fixed location.
*/
for (pass = 0; pass <= 2; pass++) {
int pin_flags = PIN_USER | PIN_VALIDATE;
if (pass == 0)
pin_flags |= PIN_NONBLOCK;
if (pass >= 1)
unpinned = eb_unbind(eb, pass == 2);
if (pass == 2) {
err = mutex_lock_interruptible(&eb->context->vm->mutex);
if (!err) {
err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
mutex_unlock(&eb->context->vm->mutex);
}
if (err)
return err;
}
list_for_each_entry(ev, &eb->unbound, bind_link) {
err = eb_reserve_vma(eb, ev, pin_flags);
if (err)
break;
}
if (err != -ENOSPC)
break;
}
return err;
}
static int eb_select_context(struct i915_execbuffer *eb)
{
struct i915_gem_context *ctx;
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
if (unlikely(IS_ERR(ctx)))
return PTR_ERR(ctx);
eb->gem_context = ctx;
if (i915_gem_context_has_full_ppgtt(ctx))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
return 0;
}
static int __eb_add_lut(struct i915_execbuffer *eb,
u32 handle, struct i915_vma *vma)
{
struct i915_gem_context *ctx = eb->gem_context;
struct i915_lut_handle *lut;
int err;
lut = i915_lut_handle_alloc();
if (unlikely(!lut))
return -ENOMEM;
i915_vma_get(vma);
if (!atomic_fetch_inc(&vma->open_count))
i915_vma_reopen(vma);
lut->handle = handle;
lut->ctx = ctx;
/* Check that the context hasn't been closed in the meantime */
err = -EINTR;
if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
if (likely(!i915_gem_context_is_closed(ctx)))
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
else
err = -ENOENT;
if (err == 0) { /* And nor has this handle */
struct drm_i915_gem_object *obj = vma->obj;
spin_lock(&obj->lut_lock);
if (idr_find(&eb->file->object_idr, handle) == obj) {
list_add(&lut->obj_link, &obj->lut_list);
} else {
radix_tree_delete(&ctx->handles_vma, handle);
err = -ENOENT;
}
spin_unlock(&obj->lut_lock);
}
mutex_unlock(&ctx->lut_mutex);
}
if (unlikely(err))
goto err;
return 0;
err:
i915_vma_close(vma);
i915_vma_put(vma);
i915_lut_handle_free(lut);
return err;
}
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
{
struct i915_address_space *vm = eb->context->vm;
do {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
rcu_read_lock();
vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
if (likely(vma && vma->vm == vm))
vma = i915_vma_tryget(vma);
rcu_read_unlock();
if (likely(vma))
return vma;
obj = i915_gem_object_lookup(eb->file, handle);
if (unlikely(!obj))
return ERR_PTR(-ENOENT);
/*
* If the user has opted-in for protected-object tracking, make
* sure the object encryption can be used.
* We only need to do this when the object is first used with
* this context, because the context itself will be banned when
* the protected objects become invalid.
*/
if (i915_gem_context_uses_protected_content(eb->gem_context) &&
i915_gem_object_is_protected(obj)) {
err = intel_pxp_key_check(&vm->gt->pxp, obj, true);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
}
}
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = __eb_add_lut(eb, handle, vma);
if (likely(!err))
return vma;
i915_gem_object_put(obj);
if (err != -EEXIST)
return ERR_PTR(err);
} while (1);
}
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
unsigned int i, current_batch = 0;
int err = 0;
INIT_LIST_HEAD(&eb->relocs);
for (i = 0; i < eb->buffer_count; i++) {
struct i915_vma *vma;
vma = eb_lookup_vma(eb, eb->exec[i].handle);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err)) {
i915_vma_put(vma);
goto err;
}
err = eb_add_vma(eb, ¤t_batch, i, vma);
if (err)
return err;
if (i915_gem_object_is_userptr(vma->obj)) {
err = i915_gem_object_userptr_submit_init(vma->obj);
if (err) {
if (i + 1 < eb->buffer_count) {
/*
* Execbuffer code expects last vma entry to be NULL,
* since we already initialized this entry,
* set the next value to NULL or we mess up
* cleanup handling.
*/
eb->vma[i + 1].vma = NULL;
}
return err;
}
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
eb->args->flags |= __EXEC_USERPTR_USED;
}
}
return 0;
err:
eb->vma[i].vma = NULL;
return err;
}
static int eb_lock_vmas(struct i915_execbuffer *eb)
{
unsigned int i;
int err;
for (i = 0; i < eb->buffer_count; i++) {
struct eb_vma *ev = &eb->vma[i];
struct i915_vma *vma = ev->vma;
err = i915_gem_object_lock(vma->obj, &eb->ww);
if (err)
return err;
}
return 0;
}
static int eb_validate_vmas(struct i915_execbuffer *eb)
{
unsigned int i;
int err;
INIT_LIST_HEAD(&eb->unbound);
err = eb_lock_vmas(eb);
if (err)
return err;
for (i = 0; i < eb->buffer_count; i++) {
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
struct i915_vma *vma = ev->vma;
err = eb_pin_vma(eb, entry, ev);
if (err == -EDEADLK)
return err;
if (!err) {
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
eb_unreserve_vma(ev);
list_add_tail(&ev->bind_link, &eb->unbound);
if (drm_mm_node_allocated(&vma->node)) {
err = i915_vma_unbind(vma);
if (err)
return err;
}
}