Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
289 changes: 242 additions & 47 deletions drivers/pcie/host/controller.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,18 +66,14 @@ void pcie_generic_ctrl_conf_write(mm_reg_t cfg_addr, pcie_bdf_t bdf,
bdf_cfg_mem[reg] = data;
}

static void pcie_generic_ctrl_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf)
{
/* Not yet supported */
}

static void pcie_generic_ctrl_type0_enumerate_bars(const struct device *ctrl_dev, pcie_bdf_t bdf)
static void pcie_generic_ctrl_enumerate_bars(const struct device *ctrl_dev, pcie_bdf_t bdf,
unsigned int nbars)
{
unsigned int bar, reg, data;
uintptr_t scratch, bar_bus_addr;
size_t size, bar_size;

for (bar = 0, reg = PCIE_CONF_BAR0; reg <= PCIE_CONF_BAR5; reg ++, bar++) {
for (bar = 0, reg = PCIE_CONF_BAR0; bar < nbars && reg <= PCIE_CONF_BAR5; reg ++, bar++) {
bool found_mem64 = false;
bool found_mem = false;

Expand Down Expand Up @@ -160,58 +156,257 @@ static void pcie_generic_ctrl_type0_enumerate_bars(const struct device *ctrl_dev
}
}

static bool pcie_generic_ctrl_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf,
unsigned int bus_number)
{
uint32_t class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV);

/* Handle only PCI-to-PCI bridge for now */
if (PCIE_CONF_CLASSREV_CLASS(class) == 0x06 &&
PCIE_CONF_CLASSREV_SUBCLASS(class) == 0x04) {
uint32_t number = pcie_conf_read(bdf, PCIE_BUS_NUMBER);
uintptr_t bar_base_addr;

pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 2);

/* Configure bus number registers */
pcie_conf_write(bdf, PCIE_BUS_NUMBER,
PCIE_BUS_NUMBER_VAL(PCIE_BDF_TO_BUS(bdf),
bus_number,
0xff, /* set max until we finished scanning */
PCIE_SECONDARY_LATENCY_TIMER(number)));

/* I/O align on 4k boundary */
if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false,
KB(4), &bar_base_addr)) {
uint32_t io = pcie_conf_read(bdf, PCIE_IO_SEC_STATUS);
uint32_t io_upper = pcie_conf_read(bdf, PCIE_IO_BASE_LIMIT_UPPER);

pcie_conf_write(bdf, PCIE_IO_SEC_STATUS,
PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io),
PCIE_IO_LIMIT(io),
PCIE_SEC_STATUS(io)));

pcie_conf_write(bdf, PCIE_IO_BASE_LIMIT_UPPER,
PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper),
PCIE_IO_LIMIT_UPPER(io_upper)));

pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_IO, true);
}

/* MEM align on 1MiB boundary */
if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false,
MB(1), &bar_base_addr)) {
uint32_t mem = pcie_conf_read(bdf, PCIE_MEM_BASE_LIMIT);

pcie_conf_write(bdf, PCIE_MEM_BASE_LIMIT,
PCIE_MEM_BASE_LIMIT_VAL((bar_base_addr & 0xfff00000) >> 16,
PCIE_MEM_LIMIT(mem)));

pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MEM, true);
}

/* TODO: add support for prefetchable */

pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MASTER, true);

return true;
}

return false;
}

static void pcie_generic_ctrl_post_enumerate_type1(const struct device *ctrl_dev, pcie_bdf_t bdf,
unsigned int bus_number)
{
uint32_t number = pcie_conf_read(bdf, PCIE_BUS_NUMBER);
uintptr_t bar_base_addr;

/* Configure bus subordinate */
pcie_conf_write(bdf, PCIE_BUS_NUMBER,
PCIE_BUS_NUMBER_VAL(PCIE_BUS_PRIMARY_NUMBER(number),
PCIE_BUS_SECONDARY_NUMBER(number),
bus_number - 1,
PCIE_SECONDARY_LATENCY_TIMER(number)));

/* I/O align on 4k boundary */
if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, false, false,
KB(4), &bar_base_addr)) {
uint32_t io = pcie_conf_read(bdf, PCIE_IO_SEC_STATUS);
uint32_t io_upper = pcie_conf_read(bdf, PCIE_IO_BASE_LIMIT_UPPER);

pcie_conf_write(bdf, PCIE_IO_SEC_STATUS,
PCIE_IO_SEC_STATUS_VAL(PCIE_IO_BASE(io),
((bar_base_addr - 1) & 0x0000f000) >> 16,
PCIE_SEC_STATUS(io)));

pcie_conf_write(bdf, PCIE_IO_BASE_LIMIT_UPPER,
PCIE_IO_BASE_LIMIT_UPPER_VAL(PCIE_IO_BASE_UPPER(io_upper),
((bar_base_addr - 1) & 0xffff0000) >> 16));
}

/* MEM align on 1MiB boundary */
if (pcie_ctrl_region_get_allocate_base(ctrl_dev, bdf, true, false,
MB(1), &bar_base_addr)) {
uint32_t mem = pcie_conf_read(bdf, PCIE_MEM_BASE_LIMIT);

pcie_conf_write(bdf, PCIE_MEM_BASE_LIMIT,
PCIE_MEM_BASE_LIMIT_VAL(PCIE_MEM_BASE(mem),
(bar_base_addr - 1) >> 16));
}

/* TODO: add support for prefetchable */
}

static void pcie_generic_ctrl_enumerate_type0(const struct device *ctrl_dev, pcie_bdf_t bdf)
{
/* Setup Type0 BARs */
pcie_generic_ctrl_type0_enumerate_bars(ctrl_dev, bdf);
pcie_generic_ctrl_enumerate_bars(ctrl_dev, bdf, 6);
}

void pcie_generic_ctrl_enumerate(const struct device *ctrl_dev, pcie_bdf_t bdf_start)
static bool pcie_generic_ctrl_enumerate_endpoint(const struct device *ctrl_dev,
pcie_bdf_t bdf, unsigned int bus_number,
bool *skip_next_func)
{
bool multifunction_device = false;
bool layout_type_1 = false;
uint32_t data, class, id;
unsigned int dev = PCIE_BDF_TO_DEV(bdf_start),
func = 0,
bus = PCIE_BDF_TO_BUS(bdf_start);

for (; dev <= PCIE_MAX_DEV; dev++) {
func = 0;
for (; func <= PCIE_MAX_FUNC; func++) {
pcie_bdf_t bdf = PCIE_BDF(bus, dev, func);
bool multifunction_device = false;
bool layout_type_1 = false;

id = pcie_conf_read(bdf, PCIE_CONF_ID);
if (id == PCIE_ID_NONE) {
continue;
}
bool is_bridge = false;

class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV);
data = pcie_conf_read(bdf, PCIE_CONF_TYPE);
*skip_next_func = false;

multifunction_device = PCIE_CONF_MULTIFUNCTION(data);
layout_type_1 = PCIE_CONF_TYPE_BRIDGE(data);
id = pcie_conf_read(bdf, PCIE_CONF_ID);
if (id == PCIE_ID_NONE) {
return false;
}

LOG_INF("[%02x:%02x.%x] %04x:%04x class %x subclass %x progif %x "
"rev %x Type%x multifunction %s",
PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
id & 0xffff, id >> 16,
PCIE_CONF_CLASSREV_CLASS(class),
PCIE_CONF_CLASSREV_SUBCLASS(class),
PCIE_CONF_CLASSREV_PROGIF(class),
PCIE_CONF_CLASSREV_REV(class),
layout_type_1 ? 1 : 0,
multifunction_device ? "true" : "false");

if (layout_type_1) {
pcie_generic_ctrl_enumerate_type1(ctrl_dev, bdf);
} else {
pcie_generic_ctrl_enumerate_type0(ctrl_dev, bdf);
}
class = pcie_conf_read(bdf, PCIE_CONF_CLASSREV);
data = pcie_conf_read(bdf, PCIE_CONF_TYPE);

multifunction_device = PCIE_CONF_MULTIFUNCTION(data);
layout_type_1 = PCIE_CONF_TYPE_BRIDGE(data);

LOG_INF("[%02x:%02x.%x] %04x:%04x class %x subclass %x progif %x "
"rev %x Type%x multifunction %s",
PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf), PCIE_BDF_TO_FUNC(bdf),
id & 0xffff, id >> 16,
PCIE_CONF_CLASSREV_CLASS(class),
PCIE_CONF_CLASSREV_SUBCLASS(class),
PCIE_CONF_CLASSREV_PROGIF(class),
PCIE_CONF_CLASSREV_REV(class),
layout_type_1 ? 1 : 0,
multifunction_device ? "true" : "false");

/* Do not enumerate sub-functions if not a multifunction device */
if (PCIE_BDF_TO_FUNC(bdf) == 0 && !multifunction_device) {
*skip_next_func = true;
}

if (layout_type_1) {
is_bridge = pcie_generic_ctrl_enumerate_type1(ctrl_dev, bdf, bus_number);
} else {
pcie_generic_ctrl_enumerate_type0(ctrl_dev, bdf);
}

return is_bridge;
}

/* Return the next BDF or PCIE_BDF_NONE without changing bus number */
static inline unsigned int pcie_bdf_bus_next(unsigned int bdf, bool skip_next_func)
{
if (skip_next_func) {
if (PCIE_BDF_TO_DEV(bdf) == PCIE_BDF_DEV_MASK) {
return PCIE_BDF_NONE;
}

return PCIE_BDF(PCIE_BDF_TO_BUS(bdf), PCIE_BDF_TO_DEV(bdf) + 1, 0);
}

if (PCIE_BDF_TO_DEV(bdf) == PCIE_BDF_DEV_MASK &&
PCIE_BDF_TO_FUNC(bdf) == PCIE_BDF_FUNC_MASK) {
return PCIE_BDF_NONE;
}

return PCIE_BDF(PCIE_BDF_TO_BUS(bdf),
(PCIE_BDF_TO_DEV(bdf) +
((PCIE_BDF_TO_FUNC(bdf) + 1) / (PCIE_BDF_FUNC_MASK + 1))),
((PCIE_BDF_TO_FUNC(bdf) + 1) & PCIE_BDF_FUNC_MASK));
}

/* Do not enumerate sub-functions if not a multifunction device */
if (PCIE_BDF_TO_FUNC(bdf) == 0 && !multifunction_device) {
break;
struct pcie_bus_state {
/* Current scanned bus BDF, always valid */
unsigned int bus_bdf;
/* Current bridge endpoint BDF, either valid or PCIE_BDF_NONE */
unsigned int bridge_bdf;
/* Next BDF to scan on bus, either valid or PCIE_BDF_NONE when all EP scanned */
unsigned int next_bdf;
};

#define MAX_TRAVERSE_STACK 256

/* Non-recursive stack based PCIe bus & bridge enumeration */
void pcie_generic_ctrl_enumerate(const struct device *ctrl_dev, pcie_bdf_t bdf_start)
{
struct pcie_bus_state stack[MAX_TRAVERSE_STACK], *state;
unsigned int bus_number = PCIE_BDF_TO_BUS(bdf_start) + 1;
bool skip_next_func = false;
bool is_bridge = false;

int stack_top = 0;

/* Start with first endpoint of immediate Root Controller bus */
stack[stack_top].bus_bdf = PCIE_BDF(PCIE_BDF_TO_BUS(bdf_start), 0, 0);
stack[stack_top].bridge_bdf = PCIE_BDF_NONE;
stack[stack_top].next_bdf = bdf_start;

while (stack_top >= 0) {
/* Top of stack contains the current PCIe bus to traverse */
state = &stack[stack_top];

/* Finish current bridge configuration before scanning other endpoints */
if (state->bridge_bdf != PCIE_BDF_NONE) {
pcie_generic_ctrl_post_enumerate_type1(ctrl_dev, state->bridge_bdf,
bus_number);

state->bridge_bdf = PCIE_BDF_NONE;
}

/* We still have more endpoints to scan */
if (state->next_bdf != PCIE_BDF_NONE) {
while (state->next_bdf != PCIE_BDF_NONE) {
is_bridge = pcie_generic_ctrl_enumerate_endpoint(ctrl_dev,
state->next_bdf,
bus_number,
&skip_next_func);
if (is_bridge) {
state->bridge_bdf = state->next_bdf;
state->next_bdf = pcie_bdf_bus_next(state->next_bdf,
skip_next_func);

/* If we can't handle more bridges, don't go further */
if (stack_top == (MAX_TRAVERSE_STACK - 1) ||
bus_number == PCIE_BDF_BUS_MASK) {
break;
}

/* Push to stack to scan this bus */
stack_top++;
stack[stack_top].bus_bdf = PCIE_BDF(bus_number, 0, 0);
stack[stack_top].bridge_bdf = PCIE_BDF_NONE;
stack[stack_top].next_bdf = PCIE_BDF(bus_number, 0, 0);

/* Increase bus number */
bus_number++;

break;
}

state->next_bdf = pcie_bdf_bus_next(state->next_bdf,
skip_next_func);
}
} else {
/* We finished scanning this bus, go back and scan next endpoints */
stack_top--;
}
}
}
41 changes: 41 additions & 0 deletions drivers/pcie/host/pcie_ecam.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,46 @@ static bool pcie_ecam_region_allocate(const struct device *dev, pcie_bdf_t bdf,
return pcie_ecam_region_allocate_type(data, bdf, bar_size, bar_bus_addr, type);
}

static bool pcie_ecam_region_get_allocate_base(const struct device *dev, pcie_bdf_t bdf,
bool mem, bool mem64, size_t align,
uintptr_t *bar_base_addr)
{
struct pcie_ecam_data *data = (struct pcie_ecam_data *)dev->data;
enum pcie_region_type type;

if (mem && !data->regions[PCIE_REGION_MEM64].size &&
!data->regions[PCIE_REGION_MEM].size) {
LOG_DBG("bdf %x no mem region defined for allocation", bdf);
return false;
}

if (!mem && !data->regions[PCIE_REGION_IO].size) {
LOG_DBG("bdf %x no io region defined for allocation", bdf);
return false;
}

/*
* Allocate into mem64 region if available or is the only available
*
* TOFIX:
* - handle allocation from/to mem/mem64 when a region is full
*/
if (mem && ((mem64 && data->regions[PCIE_REGION_MEM64].size) ||
(data->regions[PCIE_REGION_MEM64].size &&
!data->regions[PCIE_REGION_MEM].size))) {
type = PCIE_REGION_MEM64;
} else if (mem) {
type = PCIE_REGION_MEM;
} else {
type = PCIE_REGION_IO;
}

*bar_base_addr = (((data->regions[type].bus_start +
data->regions[type].allocation_offset) - 1) | ((align) - 1)) + 1;

return true;
}

static bool pcie_ecam_region_translate(const struct device *dev, pcie_bdf_t bdf,
bool mem, bool mem64, uintptr_t bar_bus_addr,
uintptr_t *bar_addr)
Expand Down Expand Up @@ -270,6 +310,7 @@ static const struct pcie_ctrl_driver_api pcie_ecam_api = {
.conf_read = pcie_ecam_ctrl_conf_read,
.conf_write = pcie_ecam_ctrl_conf_write,
.region_allocate = pcie_ecam_region_allocate,
.region_get_allocate_base = pcie_ecam_region_get_allocate_base,
.region_translate = pcie_ecam_region_translate,
};

Expand Down
Loading