/
xue.h
1857 lines (1546 loc) · 49.3 KB
/
xue.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (C) 2019 Assured Information Security, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef XUE_H
#define XUE_H
/* @cond */
#define XUE_PAGE_SIZE 4096ULL
/* Supported xHC PCI configurations */
#define XUE_XHC_CLASSC 0xC0330ULL
#define XUE_XHC_VEN_INTEL 0x8086ULL
#define XUE_XHC_DEV_Z370 0xA2AFULL
#define XUE_XHC_DEV_Z390 0xA36DULL
#define XUE_XHC_DEV_WILDCAT_POINT 0x9CB1ULL
#define XUE_XHC_DEV_SUNRISE_POINT 0x9D2FULL
#define XUE_XHC_DEV_CANNON_POINT 0x9DEDULL
#define XUE_XHC_DEV_COMET_LAKE 0x02EDULL
/* DbC idVendor and idProduct */
#define XUE_DBC_VENDOR 0x1D6B
#define XUE_DBC_PRODUCT 0x0010
#define XUE_DBC_PROTOCOL 0x0000
/* DCCTRL fields */
#define XUE_CTRL_DCR 0
#define XUE_CTRL_HOT 2
#define XUE_CTRL_HIT 3
#define XUE_CTRL_DRC 4
#define XUE_CTRL_DCE 31
/* DCPORTSC fields */
#define XUE_PSC_PED 1
#define XUE_PSC_CSC 17
#define XUE_PSC_PRC 21
#define XUE_PSC_PLC 22
#define XUE_PSC_CEC 23
#define XUE_PSC_ACK_MASK \
((1UL << XUE_PSC_CSC) | (1UL << XUE_PSC_PRC) | (1UL << XUE_PSC_PLC) | \
(1UL << XUE_PSC_CEC))
static inline int known_xhc(uint32_t dev_ven)
{
switch (dev_ven) {
case (XUE_XHC_DEV_Z370 << 16) | XUE_XHC_VEN_INTEL:
case (XUE_XHC_DEV_Z390 << 16) | XUE_XHC_VEN_INTEL:
case (XUE_XHC_DEV_WILDCAT_POINT << 16) | XUE_XHC_VEN_INTEL:
case (XUE_XHC_DEV_SUNRISE_POINT << 16) | XUE_XHC_VEN_INTEL:
case (XUE_XHC_DEV_CANNON_POINT << 16) | XUE_XHC_VEN_INTEL:
case (XUE_XHC_DEV_COMET_LAKE << 16) | XUE_XHC_VEN_INTEL:
return 1;
default:
return 0;
}
}
/* Xue system id */
enum {
xue_sysid_linux,
xue_sysid_windows,
xue_sysid_efi,
xue_sysid_xen,
xue_sysid_test
};
/* Userspace testing */
#if defined(XUE_TEST)
#include <cstdint>
#include <cstdio>
#define xue_debug(...) printf("xue debug: " __VA_ARGS__)
#define xue_alert(...) printf("xue alert: " __VA_ARGS__)
#define xue_error(...) printf("xue error: " __VA_ARGS__)
#define XUE_SYSID xue_sysid_test
extern "C" {
static inline int xue_sys_init(void *) { return 1; }
static inline void xue_sys_sfence(void *) {}
static inline void xue_sys_lfence(void *) {}
static inline void xue_sys_pause(void *) {}
static inline void xue_sys_clflush(void *, void *) {}
static inline void *xue_sys_map_xhc(void *, uint64_t, uint64_t) { return NULL; }
static inline void xue_sys_unmap_xhc(void *sys, void *, uint64_t) {}
static inline void *xue_sys_alloc_dma(void *, uint64_t) { return NULL; }
static inline void xue_sys_free_dma(void *sys, void *, uint64_t) {}
static inline void xue_sys_outd(void *sys, uint32_t, uint32_t) {}
static inline uint32_t xue_sys_ind(void *, uint32_t) { return 0; }
static inline uint64_t xue_sys_virt_to_dma(void *, const void *virt)
{
return (uint64_t)virt;
}
}
#endif
/* Bareflank VMM */
#if defined(VMM)
#include <arch/intel_x64/barrier.h>
#include <arch/intel_x64/pause.h>
#include <arch/x64/cache.h>
#include <arch/x64/portio.h>
#include <cstdio>
#include <debug/serial/serial_ns16550a.h>
#include <memory_manager/arch/x64/cr3.h>
#include <memory_manager/memory_manager.h>
#include <string>
static_assert(XUE_PAGE_SIZE == BAREFLANK_PAGE_SIZE);
#define xue_printf(...) \
do { \
char buf[256] { 0 }; \
snprintf(buf, 256, __VA_ARGS__); \
for (int i = 0; i < 256; i++) { \
if (buf[i]) { \
bfvmm::DEFAULT_COM_DRIVER::instance()->write(buf[i]); \
} else { \
break; \
} \
} \
} while (0)
#define xue_debug(...) xue_printf("xue debug: " __VA_ARGS__)
#define xue_alert(...) xue_printf("xue alert: " __VA_ARGS__)
#define xue_error(...) xue_printf("xue error: " __VA_ARGS__)
#ifdef __cplusplus
extern "C" {
#endif
static inline int xue_sys_init(void *) { return 1; }
static inline void xue_sys_sfence(void *) { ::intel_x64::wmb(); }
static inline void xue_sys_lfence(void *) { ::intel_x64::rmb(); }
static inline void xue_sys_pause(void *) { _pause(); }
static inline void xue_sys_clflush(void *, void *ptr) { _clflush(ptr); }
static inline uint64_t xue_sys_virt_to_dma(void *sys, const void *virt)
{
(void)sys;
return g_mm->virtptr_to_physint((void *)virt);
}
static inline void *xue_sys_alloc_dma(void *sys, uint64_t order)
{
(void)sys;
return g_mm->alloc(XUE_PAGE_SIZE << order);
}
static inline void xue_sys_free_dma(void *sys, void *addr, uint64_t order)
{
(void)sys;
(void)order;
g_mm->free(addr);
}
static inline void *xue_sys_map_xhc(void *sys, uint64_t phys, uint64_t count)
{
(void)sys;
void *virt = g_mm->alloc_map(count);
for (uint64_t i = 0U; i < count; i += XUE_PAGE_SIZE) {
using attr_t = bfvmm::x64::cr3::mmap::attr_type;
using mem_t = bfvmm::x64::cr3::mmap::memory_type;
g_cr3->map_4k((uint64_t)virt + i, phys + i, attr_t::read_write,
mem_t::uncacheable);
}
return virt;
}
static inline void xue_sys_unmap_xhc(void *sys, void *virt, uint64_t count)
{
(void)sys;
for (uint64_t i = 0U; i < count; i += XUE_PAGE_SIZE) {
g_cr3->unmap((uint64_t)virt + i);
}
g_mm->free_map(virt);
}
static inline void xue_sys_outd(void *sys, uint32_t port, uint32_t data)
{
(void)sys;
_outd(port, data);
}
static inline uint32_t xue_sys_ind(void *sys, uint32_t port)
{
(void)sys;
return _ind(port);
}
#ifdef __cplusplus
}
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Linux driver */
#if defined(MODULE) && defined(__linux__)
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/types.h>
#define xue_debug(...) printk(KERN_DEBUG "xue debug: " __VA_ARGS__)
#define xue_alert(...) printk(KERN_ALERT "xue alert: " __VA_ARGS__)
#define xue_error(...) printk(KERN_ERR "xue error: " __VA_ARGS__)
#define XUE_SYSID xue_sysid_linux
static inline int xue_sys_init(void *sys) { return 1; }
static inline void xue_sys_sfence(void *sys) { wmb(); }
static inline void xue_sys_lfence(void *sys) { rmb(); }
static inline void xue_sys_clflush(void *sys, void *ptr) { clflush(ptr); }
static inline void xue_sys_pause(void *sys)
{
(void)sys;
__asm volatile("pause" ::: "memory");
}
static inline void *xue_sys_alloc_dma(void *sys, uint64_t order)
{
return (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, order);
}
static inline void xue_sys_free_dma(void *sys, void *addr, uint64_t order)
{
free_pages((unsigned long)addr, order);
}
static inline void *xue_sys_map_xhc(void *sys, uint64_t phys, uint64_t count)
{
return ioremap(phys, (long unsigned int)count);
}
static inline void xue_sys_unmap_xhc(void *sys, void *virt, uint64_t count)
{
(void)count;
iounmap((volatile void *)virt);
}
static inline void xue_sys_outd(void *sys, uint32_t port, uint32_t data)
{
outl(data, port);
}
static inline uint32_t xue_sys_ind(void *sys, uint32_t port)
{
return inl((int32_t)port);
}
static inline uint64_t xue_sys_virt_to_dma(void *sys, const void *virt)
{
return virt_to_phys((volatile void *)virt);
}
#endif
/* Windows driver */
#if defined(_WIN32)
#include <basetsd.h>
typedef INT8 int8_t;
typedef INT16 int16_t;
typedef INT32 int32_t;
typedef INT64 int64_t;
typedef UINT8 uint8_t;
typedef UINT16 uint16_t;
typedef UINT32 uint32_t;
typedef UINT64 uint64_t;
typedef UINT_PTR uintptr_t;
typedef INT_PTR intptr_t;
#define XUE_SYSID xue_sysid_windows
#define xue_debug(...) \
DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_INFO_LEVEL, \
"xue debug: " __VA_ARGS__)
#define xue_alert(...) \
DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_INFO_LEVEL, \
"xue alert: " __VA_ARGS__)
#define xue_error(...) \
DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL, \
"xue error: " __VA_ARGS__)
static inline int xue_sys_init(void *sys)
{
(void)sys;
xue_error("Xue cannot be used from windows drivers");
return 0;
}
static inline void xue_sys_sfence(void *sys)
{
(void)sys;
xue_error("Xue cannot be used from windows drivers");
}
static inline void xue_sys_lfence(void *sys)
{
(void)sys;
xue_error("Xue cannot be used from windows drivers");
}
static inline void xue_sys_pause(void *sys)
{
(void)sys;
xue_error("Xue cannot be used from windows drivers");
}
static inline void *xue_sys_alloc_dma(void *sys, uint64_t order)
{
(void)sys;
(void)order;
xue_error("Xue cannot be used from windows drivers");
return NULL;
}
static inline void xue_sys_free_dma(void *sys, void *addr, uint64_t order)
{
(void)sys;
(void)addr;
(void)order;
xue_error("Xue cannot be used from windows drivers");
}
static inline void *xue_sys_map_xhc(void *sys, uint64_t phys, uint64_t count)
{
(void)sys;
(void)phys;
(void)count;
xue_error("Xue cannot be used from windows drivers");
return NULL;
}
static inline void xue_sys_unmap_xhc(void *sys, void *virt, uint64_t count)
{
(void)sys;
(void)virt;
(void)count;
xue_error("Xue cannot be used from windows drivers");
}
static inline void xue_sys_outd(void *sys, uint32_t port, uint32_t data)
{
(void)sys;
(void)port;
(void)data;
xue_error("Xue cannot be used from windows drivers");
}
static inline uint32_t xue_sys_ind(void *sys, uint32_t port)
{
(void)sys;
(void)port;
xue_error("Xue cannot be used from windows drivers");
return 0U;
}
static inline uint64_t xue_sys_virt_to_dma(void *sys, const void *virt)
{
(void)sys;
(void)virt;
xue_error("Xue cannot be used from windows drivers");
return 0U;
}
#endif
/* UEFI driver (based on gnuefi) */
#if defined(EFI)
#include <efilib.h>
#define xue_debug(...) Print(L"xue debug: " __VA_ARGS__)
#define xue_alert(...) Print(L"xue alert: " __VA_ARGS__)
#define xue_error(...) Print(L"xue error: " __VA_ARGS__)
#define XUE_SYSID xue_sysid_efi
/* NOTE: see xue_alloc_dma for the number of buffers created by alloc_dma */
#define XUE_DMA_DESC_CAP 7
struct xue_efi_dma {
UINTN pages;
EFI_PHYSICAL_ADDRESS dma_addr;
VOID *cpu_addr;
VOID *mapping;
};
struct xue_efi {
EFI_HANDLE img_hand;
EFI_HANDLE pci_hand;
EFI_PCI_IO *pci_io;
struct xue_efi_dma dma_desc[XUE_DMA_DESC_CAP];
};
static inline int xue_sys_init(void *sys)
{
EFI_STATUS rc;
EFI_HANDLE *hand;
UINTN nr_hand;
UINTN i;
struct xue_efi *efi = (struct xue_efi *)sys;
ZeroMem((VOID *)&efi->dma_desc, sizeof(efi->dma_desc));
rc = LibLocateHandle(ByProtocol, &PciIoProtocol, NULL, &nr_hand, &hand);
if (EFI_ERROR(rc)) {
xue_error("LocateHandle failed: 0x%llx\n", rc);
return 0;
}
for (i = 0; i < nr_hand; i++) {
UINT32 dev_ven;
EFI_PCI_IO *pci_io = NULL;
rc = gBS->OpenProtocol(hand[i], &PciIoProtocol, (VOID **)&pci_io,
efi->img_hand, NULL,
EFI_OPEN_PROTOCOL_GET_PROTOCOL);
if (EFI_ERROR(rc)) {
continue;
}
rc = pci_io->Pci.Read(pci_io, EfiPciIoWidthUint32, 0, 1, &dev_ven);
if (EFI_ERROR(rc)) {
gBS->CloseProtocol(hand[i], &PciIoProtocol, efi->img_hand, NULL);
continue;
}
if (known_xhc(dev_ven)) {
efi->pci_hand = hand[i];
efi->pci_io = pci_io;
return 1;
}
}
xue_error("Failed to open PCI_IO_PROTOCOL on any known xHC\n");
return 0;
}
static inline void *xue_sys_alloc_dma(void *sys, uint64_t order)
{
const EFI_ALLOCATE_TYPE atype = AllocateAnyPages;
const EFI_MEMORY_TYPE mtype = EfiRuntimeServicesData;
const UINTN attrs = EFI_PCI_ATTRIBUTE_MEMORY_CACHED;
const UINTN pages = 1UL << order;
struct xue_efi_dma *dma = NULL;
struct xue_efi *efi = (struct xue_efi *)sys;
EFI_PCI_IO *pci = efi->pci_io;
EFI_STATUS rc = 0;
VOID *addr = NULL;
UINTN i = 0;
for (; i < XUE_DMA_DESC_CAP; i++) {
dma = &efi->dma_desc[i];
if (!dma->cpu_addr) {
break;
}
dma = NULL;
}
if (!dma) {
xue_error("Out of DMA descriptors\n");
return NULL;
}
rc = pci->AllocateBuffer(pci, atype, mtype, pages, &addr, attrs);
if (EFI_ERROR(rc)) {
xue_error("AllocateBuffer failed: 0x%llx\n", rc);
return NULL;
}
dma->pages = pages;
dma->cpu_addr = addr;
return addr;
}
static inline void xue_sys_free_dma(void *sys, void *addr, uint64_t order)
{
(void)order;
struct xue_efi_dma *dma = NULL;
struct xue_efi *efi = (struct xue_efi *)sys;
EFI_PCI_IO *pci = efi->pci_io;
EFI_STATUS rc = 0;
UINTN i = 0;
for (; i < XUE_DMA_DESC_CAP; i++) {
dma = &efi->dma_desc[i];
if (dma->cpu_addr == addr) {
break;
}
dma = NULL;
}
if (!dma) {
return;
}
if (dma->mapping) {
rc = pci->Unmap(pci, dma->mapping);
if (EFI_ERROR(rc)) {
xue_error("pci->Unmap failed: 0x%llx\n", rc);
}
}
rc = pci->FreeBuffer(pci, dma->pages, addr);
if (EFI_ERROR(rc)) {
xue_error("FreeBuffer failed: 0x%llx\n", rc);
}
ZeroMem((VOID *)dma, sizeof(*dma));
}
static inline uint64_t xue_sys_virt_to_dma(void *sys, const void *virt)
{
UINTN i = 0;
UINTN offset = 0;
UINTN needed = 0;
UINTN mapped = 0;
struct xue_efi *efi = (struct xue_efi *)sys;
struct xue_efi_dma *dma = NULL;
EFI_PHYSICAL_ADDRESS dma_addr = 0;
EFI_PCI_IO *pci = efi->pci_io;
EFI_STATUS rc = 0;
VOID *mapping = NULL;
for (; i < XUE_DMA_DESC_CAP; i++) {
dma = &efi->dma_desc[i];
UINTN p = 0;
for (; p < dma->pages; p++) {
UINTN addr = (UINTN)dma->cpu_addr + (p * XUE_PAGE_SIZE);
if ((UINTN)virt == addr) {
offset = addr - (UINTN)dma->cpu_addr;
goto found;
}
}
dma = NULL;
}
if (!dma) {
xue_error("CPU addr 0x%llx not found in DMA descriptor\n", virt);
return 0;
}
found:
if (dma->dma_addr && dma->mapping) {
return dma->dma_addr + offset;
}
needed = dma->pages << EFI_PAGE_SHIFT;
mapped = needed;
rc = pci->Map(pci, EfiPciIoOperationBusMasterCommonBuffer, (void *)virt,
&mapped, &dma_addr, &mapping);
if (EFI_ERROR(rc) || mapped != needed) {
xue_error("pci->Map failed: rc: 0x%llx, mapped: %llu, needed: %llu\n",
rc, mapped, needed);
return 0;
}
dma->dma_addr = dma_addr;
dma->mapping = mapping;
if ((const void *)dma_addr != virt) {
xue_alert("Non-identity DMA mapping: dma: 0x%llx cpu: 0x%llx\n",
dma_addr, virt);
}
return dma_addr;
}
static inline void xue_sys_outd(void *sys, uint32_t port, uint32_t val)
{
(void)sys;
__asm volatile("movq %0, %%rdx\n\t"
"movq %1, %%rax\n\t"
"outl %%eax, %%dx\n\t"
:
: "g"((uint64_t)port), "g"((uint64_t)val));
}
static inline uint32_t xue_sys_ind(void *sys, uint32_t port)
{
(void)sys;
uint32_t ret;
__asm volatile("xorq %%rax, %%rax\n\t"
"movq %1, %%rdx\n\t"
"inl %%dx, %%eax\n\t"
: "=a"(ret)
: "g"((uint64_t)port));
return ret;
}
static inline void *xue_sys_map_xhc(void *sys, uint64_t phys, uint64_t count)
{
(void)sys;
(void)count;
return (void *)phys;
}
static inline void xue_sys_unmap_xhc(void *sys, void *virt, uint64_t count)
{
(void)sys;
(void)virt;
(void)count;
}
static inline void xue_sys_sfence(void *sys)
{
(void)sys;
__asm volatile("sfence" ::: "memory");
}
static inline void xue_sys_lfence(void *sys)
{
(void)sys;
__asm volatile("lfence" ::: "memory");
}
static inline void xue_sys_pause(void *sys)
{
(void)sys;
__asm volatile("pause" ::: "memory");
}
static inline void xue_sys_clflush(void *sys, void *ptr)
{
(void)sys;
__asm volatile("clflush %0" : "+m"(*(volatile char *)ptr));
}
#endif
#if defined(__XEN__) && !defined(VMM)
#include <asm/fixmap.h>
#include <asm/io.h>
#include <xen/mm.h>
#include <xen/types.h>
#define xue_debug(...) printk("xue debug: " __VA_ARGS__)
#define xue_alert(...) printk("xue alert: " __VA_ARGS__)
#define xue_error(...) printk("xue error: " __VA_ARGS__)
#define XUE_SYSID xue_sysid_xen
static inline int xue_sys_init(void *sys) { return 1; }
static inline void xue_sys_sfence(void *sys) { wmb(); }
static inline void xue_sys_lfence(void *sys) { rmb(); }
static inline void xue_sys_unmap_xhc(void *sys, void *virt, uint64_t count) {}
static inline void xue_sys_free_dma(void *sys, void *addr, uint64_t order) {}
static inline void xue_sys_pause(void *sys)
{
(void)sys;
__asm volatile("pause" ::: "memory");
}
static inline void xue_sys_clflush(void *sys, void *ptr)
{
(void)sys;
__asm volatile("clflush %0" : "+m"(*(volatile char *)ptr));
}
static inline void *xue_sys_alloc_dma(void *sys, uint64_t order)
{
return NULL;
}
static inline uint32_t xue_sys_ind(void *sys, uint32_t port)
{
return inl(port);
}
static inline void xue_sys_outd(void *sys, uint32_t port, uint32_t data)
{
outl(data, port);
}
static inline uint64_t xue_sys_virt_to_dma(void *sys, const void *virt)
{
return virt_to_maddr(virt);
}
static void *xue_sys_map_xhc(void *sys, uint64_t phys, uint64_t size)
{
size_t i;
if (size != MAX_XHCI_PAGES * XUE_PAGE_SIZE) {
return NULL;
}
for (i = FIX_XHCI_END; i >= FIX_XHCI_BEGIN; i--) {
set_fixmap_nocache(i, phys);
phys += XUE_PAGE_SIZE;
}
/*
* The fixmap grows downward, so the lowest virt is
* at the highest index
*/
return fix_to_virt(FIX_XHCI_END);
}
#endif
/******************************************************************************
* TRB ring (summarized from the manual):
*
* TRB rings are circular queues of TRBs shared between the xHC and the driver.
* Each ring has one producer and one consumer. The DbC has one event
* ring and two transfer rings; one IN and one OUT.
*
* The DbC hardware is the producer on the event ring, and
* xue is the consumer. This means that event TRBs are read-only from
* the xue.
*
* OTOH, xue is the producer of transfer TRBs on the two transfer
* rings, so xue enqueues transfers, and the hardware dequeues
* them. The dequeue pointer of a transfer ring is read by
* xue by examining the latest transfer event TRB on the event ring. The
* transfer event TRB contains the address of the transfer TRB that generated
* the event.
*
* To make each transfer ring circular, the last TRB must be a link TRB, which
* points to the beginning of the next queue. Note that this implementation
* does not support multiple segments, so each link TRB points back to the
* beginning of its own segment.
******************************************************************************/
/* TRB types */
enum {
xue_trb_norm = 1,
xue_trb_link = 6,
xue_trb_tfre = 32,
xue_trb_psce = 34
};
/* TRB completion codes */
enum { xue_trb_cc_success = 1, xue_trb_cc_trb_err = 5 };
/* DbC endpoint types */
enum { xue_ep_bulk_out = 2, xue_ep_bulk_in = 6 };
/* DMA/MMIO structures */
#pragma pack(push, 1)
struct xue_trb {
uint64_t params;
uint32_t status;
uint32_t ctrl;
};
struct xue_erst_segment {
uint64_t base;
uint16_t size;
uint8_t rsvdz[6];
};
#define XUE_CTX_SIZE 16
#define XUE_CTX_BYTES (XUE_CTX_SIZE * 4)
struct xue_dbc_ctx {
uint32_t info[XUE_CTX_SIZE];
uint32_t ep_out[XUE_CTX_SIZE];
uint32_t ep_in[XUE_CTX_SIZE];
};
struct xue_dbc_reg {
uint32_t id;
uint32_t db;
uint32_t erstsz;
uint32_t rsvdz;
uint64_t erstba;
uint64_t erdp;
uint32_t ctrl;
uint32_t st;
uint32_t portsc;
uint32_t rsvdp;
uint64_t cp;
uint32_t ddi1;
uint32_t ddi2;
};
#pragma pack(pop)
#define XUE_TRB_MAX_TFR (XUE_PAGE_SIZE << 4)
#define XUE_TRB_PER_PAGE (XUE_PAGE_SIZE / sizeof(struct xue_trb))
/* Defines the size in bytes of TRB rings as 2^XUE_TRB_RING_ORDER * 4096 */
#ifndef XUE_TRB_RING_ORDER
#define XUE_TRB_RING_ORDER 4
#endif
#define XUE_TRB_RING_CAP (XUE_TRB_PER_PAGE * (1ULL << XUE_TRB_RING_ORDER))
#define XUE_TRB_RING_BYTES (XUE_TRB_RING_CAP * sizeof(struct xue_trb))
#define XUE_TRB_RING_MASK (XUE_TRB_RING_BYTES - 1U)
struct xue_trb_ring {
struct xue_trb *trb; /* Array of TRBs */
uint32_t enq; /* The offset of the enqueue ptr */
uint32_t deq; /* The offset of the dequeue ptr */
uint8_t cyc; /* Cycle state toggled on each wrap-around */
uint8_t db; /* Doorbell target */
};
#define XUE_DB_OUT 0x0
#define XUE_DB_IN 0x1
#define XUE_DB_INVAL 0xFF
/* Defines the size in bytes of work rings as 2^XUE_WORK_RING_ORDER * 4096 */
#ifndef XUE_WORK_RING_ORDER
#define XUE_WORK_RING_ORDER 3
#endif
#define XUE_WORK_RING_CAP (XUE_PAGE_SIZE * (1ULL << XUE_WORK_RING_ORDER))
#define XUE_WORK_RING_BYTES XUE_WORK_RING_CAP
#if XUE_WORK_RING_CAP > XUE_TRB_MAX_TFR
#error "XUE_WORK_RING_ORDER must be at most 4"
#endif
struct xue_work_ring {
uint8_t *buf;
uint32_t enq;
uint32_t deq;
uint64_t dma;
};
/* @endcond */
/**
* Set of system-specific operations required by xue to initialize and
* control the DbC. An instance of this structure must be passed to
* xue_open. Any field that is NULL will default to the xue_sys_*
* implementation defined for the target platform. <em>Any non-NULL field will
* simply be called</em>.
*/
struct xue_ops {
/**
* Perform system-specific init operations
*
* @param sys a pointer to a system-specific data structure
* @return != 0 iff successful
*/
int (*init)(void *sys);
/**
* Allocate pages for read/write DMA
*
* @param sys a pointer to a system-specific data structure
* @param order allocate 2^order pages
* @return a cpu-relative virtual address for accessing the DMA buffer
*/
void *(*alloc_dma)(void *sys, uint64_t order);
/**
* Free pages previously allocated with alloc_dma
*
* @param sys a pointer to a system-specific data structure
* @param addr the cpu-relative address of the DMA range to free
* @param order the order of the set of pages to free
*/
void (*free_dma)(void *sys, void *addr, uint64_t order);
/**
* Map in the xHC MMIO region as uncacheable memory
*
* @param sys a pointer to a system-specific data structure
* @param phys the value from the xHC's BAR
* @param size the number of bytes to map in
* @return the mapped virtual address
*/
void *(*map_xhc)(void *sys, uint64_t phys, uint64_t size);
/**
* Unmap xHC MMIO region
*
* @param sys a pointer to a system-specific data structure
* @param virt the MMIO address to unmap
*/
void (*unmap_xhc)(void *sys, void *virt, uint64_t size);
/**
* Write 32 bits to IO port
*
* @param sys a pointer to a system-specific data structure
* @param port the port to write to
* @param data the data to write
*/
void (*outd)(void *sys, uint32_t port, uint32_t data);
/**
* Read 32 bits from IO port
*
* @param sys a pointer to a system-specific data structure
* @param port the port to read from
* @return the data read from the port
*/
uint32_t (*ind)(void *sys, uint32_t port);
/**
* Translate a virtual address to a DMA address
*
* @param sys a pointer to a system-specific data structure
* @param virt the address returned from a previous alloc_dma call
* @return the resulting bus-relative DMA address
*/
uint64_t (*virt_to_dma)(void *sys, const void *virt);
/**
* Perform a write memory barrier
* @param sys a pointer to a system-specific data structure
*/
void (*sfence)(void *sys);
/**
* Perform a read memory barrier
* @param sys a pointer to a system-specific data structure
*/
void (*lfence)(void *sys);
/**
* Pause CPU execution
* @param sys a pointer to a system-specific data structure
*/
void (*pause)(void *sys);
/**
* Flush the cache line at the given address
* @param sys a pointer to a system-specific data structure
* @param ptr the address to flush
*/
void (*clflush)(void *sys, void *ptr);
};
/* @cond */
struct xue {
struct xue_ops *ops;
void *sys;
struct xue_dbc_reg *dbc_reg;
struct xue_dbc_ctx *dbc_ctx;
struct xue_erst_segment *dbc_erst;
struct xue_trb_ring dbc_ering;
struct xue_trb_ring dbc_oring;
struct xue_trb_ring dbc_iring;
struct xue_work_ring dbc_owork;
char *dbc_str;
uint32_t xhc_cf8;
uint64_t xhc_mmio_phys;
uint64_t xhc_mmio_size;
uint64_t xhc_dbc_offset;
void *xhc_mmio;
int dma_allocated;
int open;
int sysid;
};