-
Notifications
You must be signed in to change notification settings - Fork 1
/
pci.c
716 lines (540 loc) · 19.6 KB
/
pci.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
/*
* \file pci.c
*
* Copyright (C) 2022 Deniz Eren <deniz.eren@outlook.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <stdio.h>
#include <string.h>
#include <sys/neutrino.h>
#include <sys/mman.h>
#include "config.h"
#include "pci.h"
#include "pci-capability.h"
#include <linux/io.h>
size_t io_port_addr_threshold = 0x0; // used in linux/io.h
// IO and MEM address space tracking
size_t pci_io_addr_space_begin = 0x0;
size_t pci_io_addr_space_end = 0x0;
size_t pci_mem_addr_space_begin = 0x0;
size_t pci_mem_addr_space_end = 0x0;
driver_selection_t* driver_selection_root = NULL;
driver_selection_t* probe_driver_selection = NULL;
int next_device_id = 0;
/* Helper structures */
bar_t* bar_list_root = NULL;
ioblock_t* ioblock_root = NULL;
pthread_mutex_t ioblock_mutex = PTHREAD_MUTEX_INITIALIZER;
/*
* TODO: use these error codes PCIBIOS_* from linux/pci.h for return values
*/
static int check_driver_support (const struct pci_driver* driver,
pci_vid_t vid, pci_did_t did)
{
if (driver->id_table != NULL) {
const struct pci_device_id *id_table = driver->id_table;
while (id_table->vendor != 0) {
if (id_table->vendor == vid &&
id_table->device == did)
{
return 1;
}
++id_table;
}
}
return 0;
}
int process_driver_selection() {
uint_t idx = 0;
pci_bdf_t bdf = 0;
while ((bdf = pci_device_find(
idx, PCI_VID_ANY, PCI_DID_ANY, PCI_CCODE_ANY) ) != PCI_BDF_NONE)
{
pci_vid_t vid;
pci_did_t did;
pci_err_t r = pci_device_read_vid(bdf, &vid);
if (r != PCI_ERR_OK) {
continue;
}
r = pci_device_read_did(bdf, &did);
if (r == PCI_ERR_OK)
{
/* does this driver handle this device ? */
struct pci_driver* detected_driver_temp = NULL;
if (check_driver_support(&adv_pci_driver, vid, did)) {
detected_driver_temp = &adv_pci_driver;
}
else if (check_driver_support(&kvaser_pci_driver, vid, did)) {
detected_driver_temp = &kvaser_pci_driver;
}
else if (check_driver_support(&ems_pci_driver, vid, did)) {
detected_driver_temp = &ems_pci_driver;
}
else if (check_driver_support(&peak_pci_driver, vid, did)) {
detected_driver_temp = &peak_pci_driver;
}
else if (check_driver_support(&plx_pci_driver, vid, did)) {
detected_driver_temp = &plx_pci_driver;
}
else if (check_driver_support(&f81601_pci_driver, vid, did)) {
detected_driver_temp = &f81601_pci_driver;
}
if (detected_driver_temp) {
bool device_disabled = false;
int i;
for (i = 0; i < num_disable_device_configs; ++i) {
if (disable_device_config[i].vid == vid &&
disable_device_config[i].did == did &&
disable_device_config[i].cap == -1)
{
device_disabled = true;
break;
}
}
if (!optd || !device_disabled) {
store_driver_selection(vid, did, detected_driver_temp);
}
}
}
/* get next device instance */
++idx;
}
return 0;
}
pci_err_t pci_enable_device (struct pci_dev* dev) {
log_trace("pci_enable_device: %04x:%04x\n",
dev->vendor, dev->device);
dev->is_managed = false;
uint_t idx = 0;
pci_bdf_t bdf;
while ((bdf = pci_device_find(
idx, dev->vendor, dev->device, PCI_CCODE_ANY) ) != PCI_BDF_NONE)
{
if (idx > 0) {
log_err("only single device per vendor and device id combination "
"supported\n");
return PCI_ERR_ENOTSUP; // Not supported
}
pci_err_t r;
dev->hdl = pci_device_attach(bdf, pci_attachFlags_EXCLUSIVE_OWNER, &r);
if (dev->hdl == NULL) {
log_err("pci_device_attach error: %s\n", pci_strerror(r));
log_warn("only a single instance of the driver is allowed\n");
return r;
}
/*
* Read some basic info
*/
pci_ssvid_t ssvid;
pci_ssid_t ssid;
pci_cs_t cs; /* chassis and slot */
if ((r = pci_device_read_ssvid(bdf, &ssvid)) != PCI_ERR_OK) {
log_err("pci_device_read_ssvid error: %s\n", pci_strerror(r));
return r;
}
log_info("read ssvid: %04x\n", ssvid);
dev->subsystem_vendor = ssvid;
if ((r = pci_device_read_ssid(bdf, &ssid)) != PCI_ERR_OK) {
log_err("pci_device_read_ssid error: %s\n", pci_strerror(r));
return r;
}
log_info("read ssid: %04x\n", ssid);
dev->subsystem_device = ssid;
cs = pci_device_chassis_slot(bdf);
dev->devfn = PCI_DEVFN(PCI_SLOT(cs), PCI_FUNC(bdf));
log_info("read cs: %x, slot: %x, func: %x, devfn: %x\n",
cs, PCI_SLOT(cs), PCI_FUNC(bdf), dev->devfn);
/*
* Load capabilities
*/
msix_init(dev);
/*
* Process bar info
*/
#define MAX_NUM_BA 32 /* Making this larger than what should be needed */
pci_ba_t ba[MAX_NUM_BA]; /* the maximum number of entries that can
be returned */
int_t nba = NELEMENTS(ba);
/* read the address space information */
r = pci_device_read_ba(dev->hdl, &nba, ba, pci_reqType_e_UNSPECIFIED);
if ((r == PCI_ERR_OK) && (nba > 0))
{
dev->nba = nba;
dev->ba = (pci_ba_t*)malloc(nba*sizeof(pci_ba_t));
dev->addr = (void __iomem**)malloc(nba*sizeof(void*));
for (int_t i = 0; i < nba; i++) {
dev->ba[i] = ba[i];
store_bar(ba[i]);
char type[16];
size_t new_begin = dev->ba[i].addr;
size_t new_end = dev->ba[i].addr + dev->ba[i].size;
if (dev->ba[i].type == pci_asType_e_IO) {
snprintf(type, 16, "I/O");
// Update IO address space tracking
if (pci_io_addr_space_begin == 0x0 ||
pci_io_addr_space_end == 0x0)
{
pci_io_addr_space_begin = new_begin;
pci_io_addr_space_end = new_end;
}
else {
if (pci_io_addr_space_begin > new_begin) {
pci_io_addr_space_begin = new_begin;
}
if (pci_io_addr_space_end < new_end) {
pci_io_addr_space_end = new_end;
}
}
}
else if (dev->ba[i].type == pci_asType_e_MEM) {
snprintf(type, 16, "MEM");
// Update MEM address space tracking
if (pci_mem_addr_space_begin == 0x0 ||
pci_mem_addr_space_end == 0x0)
{
pci_mem_addr_space_begin = new_begin;
pci_mem_addr_space_end = new_end;
}
else {
if (pci_mem_addr_space_begin > new_begin) {
pci_mem_addr_space_begin = new_begin;
}
if (pci_mem_addr_space_end < new_end) {
pci_mem_addr_space_end = new_end;
}
}
}
else {
log_err("pci_enable_device error; unknown PCI region "
"type: %d\n", dev->ba[i].type);
return PCI_ERR_ENOTSUP; // Not supported
}
log_info("read ba[%d] %s { addr: %x, size: %x }\n",
i, type, (u32)dev->ba[i].addr, (u32)dev->ba[i].size);
// Check IO vs MEM space clash and update io_port_addr_threshold
if (pci_io_addr_space_end > pci_mem_addr_space_begin &&
pci_io_addr_space_end != 0x0 &&
pci_mem_addr_space_begin != 0x0)
{
log_err("pci_enable_device error; IO/MEM space clash "
"detected; [%p:%p] vs [%p:%p]\n",
(void*)pci_io_addr_space_begin,
(void*)pci_io_addr_space_end,
(void*)pci_mem_addr_space_begin,
(void*)pci_mem_addr_space_end);
return PCI_ERR_ENOTSUP; // Not supported
}
// Used in linux/io.h
io_port_addr_threshold = pci_io_addr_space_end;
log_trace("io threshold: %p; I/O[%p:%p], MEM[%p:%p]\n",
(void*)io_port_addr_threshold,
(void*)pci_io_addr_space_begin,
(void*)pci_io_addr_space_end,
(void*)pci_mem_addr_space_begin,
(void*)pci_mem_addr_space_end);
}
}
#undef MAX_NUM_BA
/*
* Process IRQ info
*/
pci_irq_t irq[32];
int_t nirq = NELEMENTS(irq);
/* read the irq information */
r = pci_device_read_irq(dev->hdl, &nirq, irq);
if ((r == PCI_ERR_OK) && (nirq > 0))
{
dev->irq = irq[0];
for (int_t i=0; i<nirq; i++) {
log_info("read irq[%d]: %d\n", i, irq[i]);
}
irq_group_add( irq, nirq, dev->hdl, dev->msi_cap,
dev->is_msi, dev->is_msix );
}
if (dev->irq == 0) {
log_err("failed to read any IRQs\n");
return PCI_ERR_ENOTSUP; // Not supported
}
/* get next device instance */
++idx;
}
return PCI_ERR_OK;
}
void pci_disable_device (struct pci_dev* dev) {
log_trace("pci_disable_device\n");
if (pci_bdf(dev->hdl) == PCI_BDF_NONE) {
log_err("pci device not enabled\n");
return;
}
if (dev->hdl == NULL) {
log_err("pci device not attached\n");
return;
}
msix_uninit(dev);
if (dev != NULL) {
if (dev->hdl != NULL) {
pci_device_detach(dev->hdl);
}
}
}
int __must_check pcim_enable_device (struct pci_dev *pdev) {
log_trace("pcim_enable_device: %04x:%04x\n",
pdev->vendor, pdev->device);
pci_err_t r = pci_enable_device(pdev);
if (r == PCI_ERR_OK) {
pdev->is_managed = true;
}
return 0;
}
void __iomem* pci_iomap (struct pci_dev* dev, int bar, unsigned long max) {
log_trace("pci_iomap; bar: %d, addr: %p, max: %p\n", bar,
(void*)(unsigned long)dev->ba[bar].addr, (void*)max);
if (bar >= dev->nba) {
log_err("internal error; bar: %d, nba: %d\n", bar, dev->nba);
}
/* mmap() the address space(s) */
void __iomem* memptr = NULL;
if (dev->ba[bar].type == pci_asType_e_MEM) {
memptr = mmap_device_memory(
0,
dev->ba[bar].size,
PROT_READ | PROT_WRITE | PROT_NOCACHE,
0,
dev->ba[bar].addr );
if (memptr == MAP_FAILED) {
log_err("pci device address mapping failed; %s\n", strerror(errno));
return NULL;
}
}
else if (dev->ba[bar].type == pci_asType_e_IO) {
memptr = (void __iomem*)mmap_device_io(
dev->ba[bar].size, dev->ba[bar].addr );
if ((uintptr_t)memptr == MAP_DEVICE_FAILED) {
log_err("pci device address mapping failed; %s\n", strerror(errno));
return NULL;
}
}
else {
log_err("ioremap error: unknown ba type\n");
return NULL;
}
log_dbg("ba[%d] mapping successful\n", bar);
dev->addr[bar] = memptr;
store_block(dev->addr[bar], dev->ba[bar].size, dev->ba[bar]);
ioblock_t *block = get_block(dev->addr[bar]);
block->is_managed = false;
return dev->addr[bar];
}
uintptr_t pci_resource_start (struct pci_dev* dev, int bar) {
log_trace("pci_resource_start; bar: %d, addr: %p\n", bar,
(void*)(unsigned long)dev->ba[bar].addr);
return dev->ba[bar].addr;
}
uintptr_t pci_resource_end (struct pci_dev* dev, int bar) {
log_trace("pci_resource_end; bar: %d, addr: %p\n", bar,
(void*)(unsigned long)dev->ba[bar].addr);
return dev->ba[bar].addr + dev->ba[bar].size;
}
uintptr_t pci_resource_len (struct pci_dev* dev, int bar) {
log_trace("pci_resource_len; bar: %d, addr: %p\n", bar,
(void*)(unsigned long)dev->ba[bar].addr);
return dev->ba[bar].size;
}
void __iomem* ioremap (uintptr_t offset, size_t size) {
log_trace("ioremap; offset: %p, size: %p\n",
(void*)offset, (void*)size);
/* mmap() the address space(s) */
bar_t* bar = get_bar(offset);
if (bar == NULL) {
log_err("get_bar fail\n");
return NULL;
}
void __iomem* memptr = NULL;
if (bar->ba.type == pci_asType_e_MEM) {
memptr = mmap_device_memory(
0,
size,
PROT_READ | PROT_WRITE | PROT_NOCACHE,
0,
offset );
if (memptr == MAP_FAILED) {
log_err("pci device address mapping failed; %s\n", strerror(errno));
return NULL;
}
}
else if (bar->ba.type == pci_asType_e_IO) {
memptr = (void __iomem*)mmap_device_io(size, offset);
if ((uintptr_t)memptr == MAP_DEVICE_FAILED) {
log_err("pci device address mapping failed; %s\n", strerror(errno));
return NULL;
}
}
else {
log_err("ioremap error: unknown ba type\n");
return NULL;
}
log_dbg("ioremap [%p] mapping to [%p] successful\n",
(void*)offset, memptr);
store_block(memptr, size, bar->ba);
ioblock_t *block = get_block(memptr);
block->is_managed = false;
return memptr;
}
void pci_iounmap (struct pci_dev* dev, void __iomem* addr) {
log_trace("pci_iounmap; addr: %p\n", addr);
ioblock_t* block = remove_block(addr);
if (block == NULL) {
log_err("pci_iounmap; remove_block error\n");
return;
}
log_trace("pci_iounmap; addr: %p, size: %p\n",
block->addr, (void*)block->size);
if (block->ba.type == pci_asType_e_MEM) {
if (munmap_device_memory((void*)block->addr, block->size) == -1) {
log_err("internal error; munmap_device_memory failure: %s\n",
strerror(errno));
}
}
else if (block->ba.type == pci_asType_e_IO) {
if (munmap_device_io((uintptr_t)block->addr, block->size) == -1) {
log_err("internal error; munmap_device_io failure: %s\n",
strerror(errno));
}
}
else {
log_err("pci_iounmap error: unknown ba type\n");
return;
}
free(block);
}
void __iomem *pcim_iomap (struct pci_dev *pdev, int bar, unsigned long maxlen) {
log_trace("pcim_iomap; bar: %d, addr: %p, max: %p\n", bar,
(void*)(unsigned long)pdev->ba[bar].addr, (void*)maxlen);
void __iomem *result = pci_iomap(pdev, bar, maxlen);
if (result) {
ioblock_t *block = get_block(pdev->addr[bar]);
block->is_managed = true;
}
return result;
}
void pci_set_master (struct pci_dev *dev) {
log_trace("pci_set_master\n");
}
int pci_request_regions (struct pci_dev* dev, const char* res_name) {
log_trace("pci_request_regions\n");
// Return OK we will perform this check during pci_enable_device()
return 0;
}
void pci_release_regions (struct pci_dev* dev) {
log_trace("pci_release_regions\n");
// We will perform this in pci_disable_device()
}
int pci_alloc_irq_vectors (struct pci_dev *dev,
unsigned int min_vecs, unsigned int max_vecs, unsigned int flags)
{
log_trace("pci_alloc_irq_vectors\n");
// We will perform this in pci_enable_device()
if (dev->msi_cap) {
return 1;
}
return 0;
}
void pci_free_irq_vectors (struct pci_dev *dev) {
log_trace("pci_free_irq_vectors\n");
// We will perform this in pci_disable_device()
}
/**
* Important Note!
*
* Start offset of the device dependent portion of the 256/4096 byte PCI/PCIe
* configuration space from 0x40 to 0xFF/0xFFF.
*
* In Linux function pci_read_config_word() and pci_write_config_word() address
* "where" starts at 0x40.
*
* In QNX functions pci_device_cfg_rd*() and pci_device_cfg_wr*() have absolute
* address offset and it starts at 0x0. These functions will return an error if
* the offset is below 0x40.
*
* Thus to translate from Linux to QNX we must add 0x40 offset to address.
*/
#define PCI_CFG_SPACE_OFFSET 0x40
int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) {
pci_bdf_t bdf = pci_bdf(dev->hdl);
if (bdf == PCI_BDF_NONE) {
return -1;
}
pci_err_t err = pci_device_cfg_rd8( bdf,
where + PCI_CFG_SPACE_OFFSET /* See Important Note above */,
val );
if (err != PCI_ERR_OK) {
return -1;
}
return 0;
}
int pci_read_config_word (const struct pci_dev* dev, int where, u16* val) {
pci_bdf_t bdf = pci_bdf(dev->hdl);
if (bdf == PCI_BDF_NONE) {
return -1;
}
pci_err_t err = pci_device_cfg_rd16( bdf,
where + PCI_CFG_SPACE_OFFSET /* See Important Note above */,
val );
if (err != PCI_ERR_OK) {
return -1;
}
return 0;
}
int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val) {
pci_bdf_t bdf = pci_bdf(dev->hdl);
if (bdf == PCI_BDF_NONE) {
return -1;
}
pci_err_t err = pci_device_cfg_rd32( bdf,
where + PCI_CFG_SPACE_OFFSET /* See Important Note above */,
val );
if (err != PCI_ERR_OK) {
return -1;
}
return 0;
}
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) {
pci_err_t err = pci_device_cfg_wr8( dev->hdl,
where + PCI_CFG_SPACE_OFFSET /* See Important Note above */,
val, NULL );
if (err != PCI_ERR_OK) {
return -1;
}
return 0;
}
int pci_write_config_word (const struct pci_dev* dev, int where, u16 val) {
pci_err_t err = pci_device_cfg_wr16( dev->hdl,
where + PCI_CFG_SPACE_OFFSET /* See Important Note above */,
val, NULL );
if (err != PCI_ERR_OK) {
return -1;
}
return 0;
}
int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val) {
pci_err_t err = pci_device_cfg_wr32( dev->hdl,
where + PCI_CFG_SPACE_OFFSET /* See Important Note above */,
val, NULL );
if (err != PCI_ERR_OK) {
return -1;
}
return 0;
}