Skip to content

Commit eb9b347

Browse files
committed
drm/xe/vf: Move tile-related VF functions to separate file
Some of our VF functions, even if they take a GT pointer, work only on primary GT and really are tile-related and would be better to keep them separate from the rest of true GT-oriented functions. Move them to a file and update to take a tile pointer instead. Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Tomasz Lis <tomasz.lis@intel.com> Reviewed-by: Tomasz Lis <tomasz.lis@intel.com> Link: https://lore.kernel.org/r/20250602103325.549-3-michal.wajdeczko@intel.com
1 parent ce2ae1b commit eb9b347

File tree

7 files changed

+269
-253
lines changed

7 files changed

+269
-253
lines changed

drivers/gpu/drm/xe/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,8 @@ xe-y += \
139139
xe_guc_relay.o \
140140
xe_memirq.o \
141141
xe_sriov.o \
142-
xe_sriov_vf.o
142+
xe_sriov_vf.o \
143+
xe_tile_sriov_vf.o
143144

144145
xe-$(CONFIG_PCI_IOV) += \
145146
xe_gt_sriov_pf.o \

drivers/gpu/drm/xe/xe_ggtt.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,12 @@
2222
#include "xe_device.h"
2323
#include "xe_gt.h"
2424
#include "xe_gt_printk.h"
25-
#include "xe_gt_sriov_vf.h"
2625
#include "xe_gt_tlb_invalidation.h"
2726
#include "xe_map.h"
2827
#include "xe_mmio.h"
2928
#include "xe_pm.h"
3029
#include "xe_sriov.h"
30+
#include "xe_tile_sriov_vf.h"
3131
#include "xe_wa.h"
3232
#include "xe_wopcm.h"
3333

@@ -258,7 +258,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
258258
return err;
259259

260260
if (IS_SRIOV_VF(xe)) {
261-
err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
261+
err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
262262
if (err)
263263
return err;
264264
}

drivers/gpu/drm/xe/xe_gt_sriov_vf.c

Lines changed: 0 additions & 245 deletions
Original file line numberDiff line numberDiff line change
@@ -613,168 +613,6 @@ s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
613613
return config->ggtt_shift;
614614
}
615615

616-
static int vf_init_ggtt_balloons(struct xe_gt *gt)
617-
{
618-
struct xe_tile *tile = gt_to_tile(gt);
619-
struct xe_ggtt *ggtt = tile->mem.ggtt;
620-
621-
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
622-
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
623-
624-
tile->sriov.vf.ggtt_balloon[0] = xe_ggtt_node_init(ggtt);
625-
if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
626-
return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
627-
628-
tile->sriov.vf.ggtt_balloon[1] = xe_ggtt_node_init(ggtt);
629-
if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
630-
xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
631-
return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
632-
}
633-
634-
return 0;
635-
}
636-
637-
/**
638-
* xe_gt_sriov_vf_balloon_ggtt_locked - Insert balloon nodes to limit used GGTT address range.
639-
* @gt: the &xe_gt struct instance
640-
* Return: 0 on success or a negative error code on failure.
641-
*/
642-
int xe_gt_sriov_vf_balloon_ggtt_locked(struct xe_gt *gt)
643-
{
644-
struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
645-
struct xe_tile *tile = gt_to_tile(gt);
646-
struct xe_device *xe = gt_to_xe(gt);
647-
u64 start, end;
648-
int err;
649-
650-
xe_gt_assert(gt, IS_SRIOV_VF(xe));
651-
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
652-
lockdep_assert_held(&tile->mem.ggtt->lock);
653-
654-
if (!config->ggtt_size)
655-
return -ENODATA;
656-
657-
/*
658-
* VF can only use part of the GGTT as allocated by the PF:
659-
*
660-
* WOPCM GUC_GGTT_TOP
661-
* |<------------ Total GGTT size ------------------>|
662-
*
663-
* VF GGTT base -->|<- size ->|
664-
*
665-
* +--------------------+----------+-----------------+
666-
* |////////////////////| block |\\\\\\\\\\\\\\\\\|
667-
* +--------------------+----------+-----------------+
668-
*
669-
* |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
670-
*/
671-
672-
start = xe_wopcm_size(xe);
673-
end = config->ggtt_base;
674-
if (end != start) {
675-
err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[0],
676-
start, end);
677-
if (err)
678-
return err;
679-
}
680-
681-
start = config->ggtt_base + config->ggtt_size;
682-
end = GUC_GGTT_TOP;
683-
if (end != start) {
684-
err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[1],
685-
start, end);
686-
if (err) {
687-
xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
688-
return err;
689-
}
690-
}
691-
692-
return 0;
693-
}
694-
695-
static int vf_balloon_ggtt(struct xe_gt *gt)
696-
{
697-
struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
698-
int err;
699-
700-
mutex_lock(&ggtt->lock);
701-
err = xe_gt_sriov_vf_balloon_ggtt_locked(gt);
702-
mutex_unlock(&ggtt->lock);
703-
704-
return err;
705-
}
706-
707-
/**
708-
* xe_gt_sriov_vf_deballoon_ggtt_locked - Remove balloon nodes.
709-
* @gt: the &xe_gt struct instance
710-
*/
711-
void xe_gt_sriov_vf_deballoon_ggtt_locked(struct xe_gt *gt)
712-
{
713-
struct xe_tile *tile = gt_to_tile(gt);
714-
715-
xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
716-
xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[1]);
717-
xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
718-
}
719-
720-
static void vf_deballoon_ggtt(struct xe_gt *gt)
721-
{
722-
struct xe_tile *tile = gt_to_tile(gt);
723-
724-
mutex_lock(&tile->mem.ggtt->lock);
725-
xe_gt_sriov_vf_deballoon_ggtt_locked(gt);
726-
mutex_unlock(&tile->mem.ggtt->lock);
727-
}
728-
729-
static void vf_fini_ggtt_balloons(struct xe_gt *gt)
730-
{
731-
struct xe_tile *tile = gt_to_tile(gt);
732-
733-
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
734-
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
735-
736-
xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[1]);
737-
xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
738-
}
739-
740-
static void cleanup_ggtt(struct drm_device *drm, void *arg)
741-
{
742-
struct xe_gt *gt = arg;
743-
744-
vf_deballoon_ggtt(gt);
745-
vf_fini_ggtt_balloons(gt);
746-
}
747-
748-
/**
749-
* xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
750-
* @gt: the &xe_gt
751-
*
752-
* This function is for VF use only.
753-
*
754-
* Return: 0 on success or a negative error code on failure.
755-
*/
756-
int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
757-
{
758-
struct xe_tile *tile = gt_to_tile(gt);
759-
struct xe_device *xe = tile_to_xe(tile);
760-
int err;
761-
762-
if (xe_gt_is_media_type(gt))
763-
return 0;
764-
765-
err = vf_init_ggtt_balloons(gt);
766-
if (err)
767-
return err;
768-
769-
err = vf_balloon_ggtt(gt);
770-
if (err) {
771-
vf_fini_ggtt_balloons(gt);
772-
return err;
773-
}
774-
775-
return drmm_add_action_or_reset(&xe->drm, cleanup_ggtt, gt);
776-
}
777-
778616
static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
779617
{
780618
u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
@@ -870,89 +708,6 @@ int xe_gt_sriov_vf_connect(struct xe_gt *gt)
870708
return err;
871709
}
872710

873-
/**
874-
* DOC: GGTT nodes shifting during VF post-migration recovery
875-
*
876-
* The first fixup applied to the VF KMD structures as part of post-migration
877-
* recovery is shifting nodes within &xe_ggtt instance. The nodes are moved
878-
* from range previously assigned to this VF, into newly provisioned area.
879-
* The changes include balloons, which are resized accordingly.
880-
*
881-
* The balloon nodes are there to eliminate unavailable ranges from use: one
882-
* reserves the GGTT area below the range for current VF, and another one
883-
* reserves area above.
884-
*
885-
* Below is a GGTT layout of example VF, with a certain address range assigned to
886-
* said VF, and inaccessible areas above and below:
887-
*
888-
* 0 4GiB
889-
* |<--------------------------- Total GGTT size ----------------------------->|
890-
* WOPCM GUC_TOP
891-
* |<-------------- Area mappable by xe_ggtt instance ---------------->|
892-
*
893-
* +---+---------------------------------+----------+----------------------+---+
894-
* |\\\|/////////////////////////////////| VF mem |//////////////////////|\\\|
895-
* +---+---------------------------------+----------+----------------------+---+
896-
*
897-
* Hardware enforced access rules before migration:
898-
*
899-
* |<------- inaccessible for VF ------->|<VF owned>|<-- inaccessible for VF ->|
900-
*
901-
* GGTT nodes used for tracking allocations:
902-
*
903-
* |<---------- balloon ------------>|<- nodes->|<----- balloon ------>|
904-
*
905-
* After the migration, GGTT area assigned to the VF might have shifted, either
906-
* to lower or to higher address. But we expect the total size and extra areas to
907-
* be identical, as migration can only happen between matching platforms.
908-
* Below is an example of GGTT layout of the VF after migration. Content of the
909-
* GGTT for VF has been moved to a new area, and we receive its address from GuC:
910-
*
911-
* +---+----------------------+----------+---------------------------------+---+
912-
* |\\\|//////////////////////| VF mem |/////////////////////////////////|\\\|
913-
* +---+----------------------+----------+---------------------------------+---+
914-
*
915-
* Hardware enforced access rules after migration:
916-
*
917-
* |<- inaccessible for VF -->|<VF owned>|<------- inaccessible for VF ------->|
918-
*
919-
* So the VF has a new slice of GGTT assigned, and during migration process, the
920-
* memory content was copied to that new area. But the &xe_ggtt nodes are still
921-
* tracking allocations using the old addresses. The nodes within VF owned area
922-
* have to be shifted, and balloon nodes need to be resized to properly mask out
923-
* areas not owned by the VF.
924-
*
925-
* Fixed &xe_ggtt nodes used for tracking allocations:
926-
*
927-
* |<------ balloon ------>|<- nodes->|<----------- balloon ----------->|
928-
*
929-
* Due to use of GPU profiles, we do not expect the old and new GGTT ares to
930-
* overlap; but our node shifting will fix addresses properly regardless.
931-
*/
932-
933-
/**
934-
* xe_gt_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
935-
* @gt: the &xe_gt struct instance
936-
* @shift: the shift value
937-
*
938-
* Since Global GTT is not virtualized, each VF has an assigned range
939-
* within the global space. This range might have changed during migration,
940-
* which requires all memory addresses pointing to GGTT to be shifted.
941-
*/
942-
void xe_gt_sriov_vf_fixup_ggtt_nodes(struct xe_gt *gt, s64 shift)
943-
{
944-
struct xe_tile *tile = gt_to_tile(gt);
945-
struct xe_ggtt *ggtt = tile->mem.ggtt;
946-
947-
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
948-
949-
mutex_lock(&ggtt->lock);
950-
xe_gt_sriov_vf_deballoon_ggtt_locked(gt);
951-
xe_ggtt_shift_nodes_locked(ggtt, shift);
952-
xe_gt_sriov_vf_balloon_ggtt_locked(gt);
953-
mutex_unlock(&ggtt->lock);
954-
}
955-
956711
/**
957712
* xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
958713
* or just mark that a GuC is ready for it.

drivers/gpu/drm/xe/xe_gt_sriov_vf.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,6 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
1717
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
1818
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
1919
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
20-
int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
21-
int xe_gt_sriov_vf_balloon_ggtt_locked(struct xe_gt *gt);
22-
void xe_gt_sriov_vf_deballoon_ggtt_locked(struct xe_gt *gt);
23-
void xe_gt_sriov_vf_fixup_ggtt_nodes(struct xe_gt *gt, s64 shift);
2420
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
2521
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
2622

drivers/gpu/drm/xe/xe_sriov_vf.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include "xe_sriov.h"
1616
#include "xe_sriov_printk.h"
1717
#include "xe_sriov_vf.h"
18+
#include "xe_tile_sriov_vf.h"
1819

1920
/**
2021
* DOC: VF restore procedure in PF KMD and VF KMD
@@ -211,7 +212,7 @@ static bool vf_post_migration_fixup_ggtt_nodes(struct xe_device *xe)
211212
shift = xe_gt_sriov_vf_ggtt_shift(gt);
212213
if (shift) {
213214
need_fixups = true;
214-
xe_gt_sriov_vf_fixup_ggtt_nodes(gt, shift);
215+
xe_tile_sriov_vf_fixup_ggtt_nodes(tile, shift);
215216
}
216217
}
217218
return need_fixups;

0 commit comments

Comments
 (0)