@@ -1491,3 +1491,189 @@ amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
14911491 else
14921492 return amdgpu_gmc_get_memory_partition (adev , NULL );
14931493}
1494+
1495+ static bool amdgpu_gmc_validate_partition_info (struct amdgpu_device * adev )
1496+ {
1497+ enum amdgpu_memory_partition mode ;
1498+ u32 supp_modes ;
1499+ bool valid ;
1500+
1501+ mode = amdgpu_gmc_get_memory_partition (adev , & supp_modes );
1502+
1503+ /* Mode detected by hardware not present in supported modes */
1504+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE ) &&
1505+ !(BIT (mode - 1 ) & supp_modes ))
1506+ return false;
1507+
1508+ switch (mode ) {
1509+ case UNKNOWN_MEMORY_PARTITION_MODE :
1510+ case AMDGPU_NPS1_PARTITION_MODE :
1511+ valid = (adev -> gmc .num_mem_partitions == 1 );
1512+ break ;
1513+ case AMDGPU_NPS2_PARTITION_MODE :
1514+ valid = (adev -> gmc .num_mem_partitions == 2 );
1515+ break ;
1516+ case AMDGPU_NPS4_PARTITION_MODE :
1517+ valid = (adev -> gmc .num_mem_partitions == 3 ||
1518+ adev -> gmc .num_mem_partitions == 4 );
1519+ break ;
1520+ case AMDGPU_NPS8_PARTITION_MODE :
1521+ valid = (adev -> gmc .num_mem_partitions == 8 );
1522+ break ;
1523+ default :
1524+ valid = false;
1525+ }
1526+
1527+ return valid ;
1528+ }
1529+
1530+ static bool amdgpu_gmc_is_node_present (int * node_ids , int num_ids , int nid )
1531+ {
1532+ int i ;
1533+
1534+ /* Check if node with id 'nid' is present in 'node_ids' array */
1535+ for (i = 0 ; i < num_ids ; ++ i )
1536+ if (node_ids [i ] == nid )
1537+ return true;
1538+
1539+ return false;
1540+ }
1541+
1542+ static void
1543+ amdgpu_gmc_init_acpi_mem_ranges (struct amdgpu_device * adev ,
1544+ struct amdgpu_mem_partition_info * mem_ranges )
1545+ {
1546+ struct amdgpu_numa_info numa_info ;
1547+ int node_ids [AMDGPU_MAX_MEM_RANGES ];
1548+ int num_ranges = 0 , ret ;
1549+ int num_xcc , xcc_id ;
1550+ uint32_t xcc_mask ;
1551+
1552+ num_xcc = NUM_XCC (adev -> gfx .xcc_mask );
1553+ xcc_mask = (1U << num_xcc ) - 1 ;
1554+
1555+ for_each_inst (xcc_id , xcc_mask ) {
1556+ ret = amdgpu_acpi_get_mem_info (adev , xcc_id , & numa_info );
1557+ if (ret )
1558+ continue ;
1559+
1560+ if (numa_info .nid == NUMA_NO_NODE ) {
1561+ mem_ranges [0 ].size = numa_info .size ;
1562+ mem_ranges [0 ].numa .node = numa_info .nid ;
1563+ num_ranges = 1 ;
1564+ break ;
1565+ }
1566+
1567+ if (amdgpu_gmc_is_node_present (node_ids , num_ranges ,
1568+ numa_info .nid ))
1569+ continue ;
1570+
1571+ node_ids [num_ranges ] = numa_info .nid ;
1572+ mem_ranges [num_ranges ].numa .node = numa_info .nid ;
1573+ mem_ranges [num_ranges ].size = numa_info .size ;
1574+ ++ num_ranges ;
1575+ }
1576+
1577+ adev -> gmc .num_mem_partitions = num_ranges ;
1578+ }
1579+
1580+ void amdgpu_gmc_init_sw_mem_ranges (struct amdgpu_device * adev ,
1581+ struct amdgpu_mem_partition_info * mem_ranges )
1582+ {
1583+ enum amdgpu_memory_partition mode ;
1584+ u32 start_addr = 0 , size ;
1585+ int i , r , l ;
1586+
1587+ mode = amdgpu_gmc_query_memory_partition (adev );
1588+
1589+ switch (mode ) {
1590+ case UNKNOWN_MEMORY_PARTITION_MODE :
1591+ adev -> gmc .num_mem_partitions = 0 ;
1592+ break ;
1593+ case AMDGPU_NPS1_PARTITION_MODE :
1594+ adev -> gmc .num_mem_partitions = 1 ;
1595+ break ;
1596+ case AMDGPU_NPS2_PARTITION_MODE :
1597+ adev -> gmc .num_mem_partitions = 2 ;
1598+ break ;
1599+ case AMDGPU_NPS4_PARTITION_MODE :
1600+ if (adev -> flags & AMD_IS_APU )
1601+ adev -> gmc .num_mem_partitions = 3 ;
1602+ else
1603+ adev -> gmc .num_mem_partitions = 4 ;
1604+ break ;
1605+ case AMDGPU_NPS8_PARTITION_MODE :
1606+ adev -> gmc .num_mem_partitions = 8 ;
1607+ break ;
1608+ default :
1609+ adev -> gmc .num_mem_partitions = 1 ;
1610+ break ;
1611+ }
1612+
1613+ /* Use NPS range info, if populated */
1614+ r = amdgpu_gmc_get_nps_memranges (adev , mem_ranges ,
1615+ & adev -> gmc .num_mem_partitions );
1616+ if (!r ) {
1617+ l = 0 ;
1618+ for (i = 1 ; i < adev -> gmc .num_mem_partitions ; ++ i ) {
1619+ if (mem_ranges [i ].range .lpfn >
1620+ mem_ranges [i - 1 ].range .lpfn )
1621+ l = i ;
1622+ }
1623+
1624+ } else {
1625+ if (!adev -> gmc .num_mem_partitions ) {
1626+ dev_warn (adev -> dev ,
1627+ "Not able to detect NPS mode, fall back to NPS1\n" );
1628+ adev -> gmc .num_mem_partitions = 1 ;
1629+ }
1630+ /* Fallback to sw based calculation */
1631+ size = (adev -> gmc .real_vram_size + SZ_16M ) >> AMDGPU_GPU_PAGE_SHIFT ;
1632+ size /= adev -> gmc .num_mem_partitions ;
1633+
1634+ for (i = 0 ; i < adev -> gmc .num_mem_partitions ; ++ i ) {
1635+ mem_ranges [i ].range .fpfn = start_addr ;
1636+ mem_ranges [i ].size =
1637+ ((u64 )size << AMDGPU_GPU_PAGE_SHIFT );
1638+ mem_ranges [i ].range .lpfn = start_addr + size - 1 ;
1639+ start_addr += size ;
1640+ }
1641+
1642+ l = adev -> gmc .num_mem_partitions - 1 ;
1643+ }
1644+
1645+ /* Adjust the last one */
1646+ mem_ranges [l ].range .lpfn =
1647+ (adev -> gmc .real_vram_size >> AMDGPU_GPU_PAGE_SHIFT ) - 1 ;
1648+ mem_ranges [l ].size =
1649+ adev -> gmc .real_vram_size -
1650+ ((u64 )mem_ranges [l ].range .fpfn << AMDGPU_GPU_PAGE_SHIFT );
1651+ }
1652+
1653+ int amdgpu_gmc_init_mem_ranges (struct amdgpu_device * adev )
1654+ {
1655+ bool valid ;
1656+
1657+ adev -> gmc .mem_partitions = kcalloc (AMDGPU_MAX_MEM_RANGES ,
1658+ sizeof (struct amdgpu_mem_partition_info ),
1659+ GFP_KERNEL );
1660+ if (!adev -> gmc .mem_partitions )
1661+ return - ENOMEM ;
1662+
1663+ if (adev -> gmc .is_app_apu )
1664+ amdgpu_gmc_init_acpi_mem_ranges (adev , adev -> gmc .mem_partitions );
1665+ else
1666+ amdgpu_gmc_init_sw_mem_ranges (adev , adev -> gmc .mem_partitions );
1667+
1668+ if (amdgpu_sriov_vf (adev ))
1669+ valid = true;
1670+ else
1671+ valid = amdgpu_gmc_validate_partition_info (adev );
1672+ if (!valid ) {
1673+ /* TODO: handle invalid case */
1674+ dev_warn (adev -> dev ,
1675+ "Mem ranges not matching with hardware config\n" );
1676+ }
1677+
1678+ return 0 ;
1679+ }
0 commit comments