Skip to content
This repository has been archived by the owner on Aug 5, 2022. It is now read-only.

Commit

Permalink
2016q2 update based on drm-intel kernel 4.3.0
Browse files Browse the repository at this point in the history
Signed-off-by: libo zhu <libo.zhu@intel.com>
  • Loading branch information
vmmqa committed Jul 18, 2016
1 parent 6458387 commit 83df1d4
Show file tree
Hide file tree
Showing 26 changed files with 623 additions and 219 deletions.
1 change: 1 addition & 0 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2518,6 +2518,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,

#ifdef CONFIG_KVMGT
if (kvmgt_gfn_is_write_protected(vcpu->kvm, gfn)) {
spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
ret = 1;
goto set_pte;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_gem_vgtbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ static int vgt_decode_information(struct drm_device *dev,
return -EINVAL;
}

args->size = (((args->width * args->height * args->bpp) / 8) +
args->size = (((args->stride * args->height * args->bpp) / 8) +
(PAGE_SIZE - 1)) >> PAGE_SHIFT;

if (args->start & (PAGE_SIZE - 1)) {
Expand Down
88 changes: 62 additions & 26 deletions drivers/gpu/drm/i915/intel_pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -4219,38 +4219,74 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
return;

/* Note the units here are not exactly 1us, but 1280ns. */
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 95% busy over 16ms */
ei_up = 16000;
threshold_up = 95;
if (!i915_host_mediate) {
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 95% busy over 16ms */
ei_up = 16000;
threshold_up = 95;

/* Downclock if less than 85% busy over 32ms */
ei_down = 32000;
threshold_down = 85;
break;

/* Downclock if less than 85% busy over 32ms */
ei_down = 32000;
threshold_down = 85;
break;
case BETWEEN:
/* Upclock if more than 90% busy over 13ms */
ei_up = 13000;
threshold_up = 90;

case BETWEEN:
/* Upclock if more than 90% busy over 13ms */
ei_up = 13000;
threshold_up = 90;
/* Downclock if less than 75% busy over 32ms */
ei_down = 32000;
threshold_down = 75;
break;

/* Downclock if less than 75% busy over 32ms */
ei_down = 32000;
threshold_down = 75;
break;
case HIGH_POWER:
/* Upclock if more than 85% busy over 10ms */
ei_up = 10000;
threshold_up = 85;

case HIGH_POWER:
/* Upclock if more than 85% busy over 10ms */
ei_up = 10000;
threshold_up = 85;
/* Downclock if less than 60% busy over 32ms */
ei_down = 32000;
threshold_down = 60;
break;
}

/* Downclock if less than 60% busy over 32ms */
ei_down = 32000;
threshold_down = 60;
break;
}
} else {
/* add separated rps threshold policy in GVT-g host */
switch (new_power) {
case LOW_POWER:
/* Upclock if more than 80% busy over 16ms */
ei_up = 16000;
threshold_up = 80;

/* Downclock if less than 60% busy over 32ms */
ei_down = 32000;
threshold_down = 60;
break;

case BETWEEN:
/* Upclock if more than 75% busy over 13ms */
ei_up = 13000;
threshold_up = 75;

/* Downclock if less than 55% busy over 32ms */
ei_down = 32000;
threshold_down = 55;
break;

case HIGH_POWER:
/* Upclock if more than 70% busy over 10ms */
ei_up = 10000;
threshold_up = 70;

/* Downclock if less than 40% busy over 32ms */
ei_down = 32000;
threshold_down = 40;
break;
}

}
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
Expand Down
5 changes: 5 additions & 0 deletions drivers/gpu/drm/i915/vgt/aperture_gm.c
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,11 @@ void rsvd_aperture_free(struct pgt_device *pdev, unsigned long start, unsigned l
}
}

void rsvd_aperture_runout_handler(struct pgt_device *pdev)
{
pdev->dummy_vm_switch = true;
}

ssize_t get_avl_vm_aperture_gm_and_fence(struct pgt_device *pdev, char *buf,
ssize_t buf_sz)
{
Expand Down
37 changes: 28 additions & 9 deletions drivers/gpu/drm/i915/vgt/cmd_parser.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,8 @@ static inline int add_patch_entry(struct parser_exec_state *s,
struct cmd_patch_info *patch;
int next;

ASSERT(s->shadow != INDIRECT_CTX_SHADOW);

if (addr == NULL) {
vgt_err("VM(%d) CMD_SCAN: NULL address to be patched\n",
s->vgt->vgt_id);
Expand Down Expand Up @@ -160,6 +162,8 @@ static inline int add_post_handle_entry(struct parser_exec_state *s,
struct cmd_handler_info *entry;
int next;

ASSERT(s->shadow != INDIRECT_CTX_SHADOW);

next = get_next_entry(list);
if (next == list->count) {
vgt_err("CMD_SCAN: no free post-handle entry\n");
Expand Down Expand Up @@ -191,6 +195,8 @@ static int add_tail_entry(struct parser_exec_state *s,
struct cmd_tail_info *entry;
int next;

ASSERT(s->shadow != INDIRECT_CTX_SHADOW);

next = get_next_entry(list);
if (next == list->count) {
vgt_err("CMD_SCAN: no free tail entry\n");
Expand Down Expand Up @@ -994,10 +1000,20 @@ static inline unsigned long vgt_get_gma_from_bb_start(
if (g_gm_is_valid(vgt, ip_gma)) {
bb_start_gma = 0;
va = vgt_gma_to_va(vgt->gtt.ggtt_mm, ip_gma);
if (va == NULL) {
vgt_err("VM-%d(ring %d>: Failed to get va of guest gma 0x%lx!\n",
vgt->vm_id, ring_id, ip_gma);
return 0;
}
hypervisor_read_va(vgt, va, &cmd, 4, 1);
opcode = vgt_get_opcode(cmd, ring_id);
ASSERT(opcode == OP_MI_BATCH_BUFFER_START);
va = vgt_gma_to_va(vgt->gtt.ggtt_mm, ip_gma + 4);
if (va == NULL) {
vgt_err("VM-%d(ring %d>: Failed to get va of guest gma 0x%lx!\n",
vgt->vm_id, ring_id, ip_gma + 4);
return 0;
}
hypervisor_read_va(vgt, va, &bb_start_gma, 4, 1);
} else if (g_gm_is_reserved(vgt, ip_gma)) {
va = v_aperture(vgt->pdev, ip_gma);
Expand Down Expand Up @@ -1357,8 +1373,7 @@ static int vgt_handle_mi_display_flip(struct parser_exec_state *s)
}

{
if (!vgt_flip_parameter_check(s, plane_code, stride_val, surf_val))
goto wrong_command;
vgt_flip_parameter_check(s, plane_code, stride_val, surf_val);

GET_INFO_FOR_FLIP(pipe, plane,
ctrl_reg, surf_reg, stride_reg, stride_mask);
Expand Down Expand Up @@ -1418,7 +1433,7 @@ static int vgt_handle_mi_display_flip(struct parser_exec_state *s)
((value & ~plane_select_mask) |
(real_plane_code << plane_select_shift)));

vgt_inject_flip_done(s->vgt, pipe);
vgt_inject_flip_done(s->vgt, pipe, plane);

return 0;
}
Expand All @@ -1432,7 +1447,7 @@ static int vgt_handle_mi_display_flip(struct parser_exec_state *s)

rc |= add_patch_entry(s, cmd_ptr(s, 0), MI_NOOP);

vgt_inject_flip_done(s->vgt, pipe);
vgt_inject_flip_done(s->vgt, pipe, plane);

return rc;

Expand Down Expand Up @@ -1771,6 +1786,7 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}

#define LITE_RESTORE_FLOOD_THRESHOLD 1000
static int vgt_perform_bb_shadow(struct parser_exec_state *s)
{
struct vgt_device *vgt = s->vgt;
Expand Down Expand Up @@ -1835,7 +1851,9 @@ static int vgt_perform_bb_shadow(struct parser_exec_state *s)
s_cmd_page->guest_gma = bb_guest_gma;
s_cmd_page->bound_gma = shadow_gma;

s->el_ctx->shadow_priv_bb.n_pages ++;
if (s->el_ctx->shadow_priv_bb.n_pages++ > LITE_RESTORE_FLOOD_THRESHOLD)
rsvd_aperture_runout_handler(vgt->pdev);

list_add_tail(&s_cmd_page->list,
&s->el_ctx->shadow_priv_bb.pages);

Expand Down Expand Up @@ -2783,7 +2801,7 @@ static inline bool gma_out_of_range(unsigned long gma, unsigned long gma_head, u
#define MAX_PARSER_ERROR_NUM 10

static int __vgt_scan_vring(struct vgt_device *vgt, int ring_id, vgt_reg_t head,
vgt_reg_t tail, vgt_reg_t base, vgt_reg_t size, bool shadow)
vgt_reg_t tail, vgt_reg_t base, vgt_reg_t size, cmd_shadow_t shadow)
{
unsigned long gma_head, gma_tail, gma_bottom;
struct parser_exec_state s;
Expand Down Expand Up @@ -2869,7 +2887,7 @@ static int __vgt_scan_vring(struct vgt_device *vgt, int ring_id, vgt_reg_t head,
}
}

if (!rc) {
if (!rc && shadow != INDIRECT_CTX_SHADOW) {
/*
* Set flag to indicate the command buffer is end with user interrupt,
* and save the instruction's offset in ring buffer.
Expand Down Expand Up @@ -3061,7 +3079,8 @@ int vgt_scan_vring(struct vgt_device *vgt, int ring_id)
if (ret == 0) {
ret = __vgt_scan_vring(vgt, ring_id, rs->last_scan_head,
vring->tail & RB_TAIL_OFF_MASK, rb_base,
_RING_CTL_BUF_SIZE(vring->ctl), shadow_cmd_buffer);
_RING_CTL_BUF_SIZE(vring->ctl),
shadow_cmd_buffer ? NORMAL_CMD_SHADOW : NO_CMD_SHADOW);

rs->last_scan_head = vring->tail;
}
Expand All @@ -3084,7 +3103,7 @@ int vgt_scan_vring(struct vgt_device *vgt, int ring_id)
if (ret)
goto err;
if (!__vgt_scan_vring(vgt, ring_id, 0, ctx_tail,
ctx_base, dummy_ctx_size, true)) {
ctx_base, dummy_ctx_size, INDIRECT_CTX_SHADOW)) {
vgt_get_bb_per_ctx_shadow_base(vgt, rs->el_ctx);
} else {
ret = -1;
Expand Down
8 changes: 7 additions & 1 deletion drivers/gpu/drm/i915/vgt/cmd_parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -439,6 +439,12 @@ typedef enum{
PPGTT_BUFFER
}gtt_addr_t;

typedef enum {
NO_CMD_SHADOW = 0,
NORMAL_CMD_SHADOW = 1,
INDIRECT_CTX_SHADOW = 2,
} cmd_shadow_t;

struct parser_exec_state {
struct vgt_device *vgt;
int ring_id;
Expand Down Expand Up @@ -487,7 +493,7 @@ struct parser_exec_state {
void *ip_buf;

struct execlist_context *el_ctx;
bool shadow;
cmd_shadow_t shadow;
};

#define CMD_TAIL_NUM 1024
Expand Down
40 changes: 39 additions & 1 deletion drivers/gpu/drm/i915/vgt/debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ static void vgt_dump_dpy_mmio(struct seq_file *m, struct pgt_device *pdev,
seq_printf(m, "\t\teDP select: %s\n", str);
}
seq_printf(m, "\n");

if (is_current_display_owner(vgt)) {
return;
}
Expand Down Expand Up @@ -1397,6 +1397,12 @@ int vgt_create_debugfs(struct vgt_device *vgt)
debugfs_create_u64_node ("gpt_find_miss_cnt", 0444, perf_dir_entry, &(vgt->stat.gpt_find_miss_cnt));
debugfs_create_u64_node ("gpt_find_miss_cycles", 0444, perf_dir_entry, &(vgt->stat.gpt_find_miss_cycles));
debugfs_create_u64_node ("skip_bb_cnt", 0444, perf_dir_entry, &(vgt->stat.skip_bb_cnt));
debugfs_create_u64_node ("shadow_last_level_page_cnt", 0444, perf_dir_entry, &(vgt->stat.shadow_last_level_page_cnt));
debugfs_create_u64_node ("shadow_last_level_page_cycles", 0444, perf_dir_entry, &(vgt->stat.shadow_last_level_page_cycles));
debugfs_create_u64_node ("oos_page_cnt", 0444, perf_dir_entry, &(vgt->stat.oos_page_cnt));
debugfs_create_u64_node ("oos_page_cycles", 0444, perf_dir_entry, &(vgt->stat.oos_page_cycles));
debugfs_create_u64_node ("oos_pte_cnt", 0444, perf_dir_entry, &(vgt->stat.oos_pte_cnt));
debugfs_create_u64_node ("oos_pte_cycles", 0444, perf_dir_entry, &(vgt->stat.oos_pte_cycles));

/* cmd statistics for ring/batch buffers */
cmdstat_dir_entry = debugfs_create_dir("ring", perf_dir_entry);
Expand All @@ -1411,6 +1417,38 @@ int vgt_create_debugfs(struct vgt_device *vgt)
return 0;
}

#define VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(runtime_module_param, runtime_dir_entry, target_module_param) \
debugfs_create_symlink(#runtime_module_param, runtime_dir_entry, "/sys/module/i915/parameters/" #target_module_param)

void vgt_debugfs_symlink_module_param(void)
{
struct dentry *runtime_dir_entry = debugfs_create_dir("runtime", d_vgt_debug);
if (!runtime_dir_entry) {
vgt_err("Failed to create debugfs directory: runtime\n");
return ;
}
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(vgt_debug, runtime_dir_entry, debug);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(tbs_period_ms, runtime_dir_entry, tbs_period_ms);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(render_engine_reset, runtime_dir_entry, render_engine_reset);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(propagate_monitor_to_guest, runtime_dir_entry, propagate_monitor_to_guest);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(preallocated_shadow_pages, runtime_dir_entry, preallocated_shadow_pages);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(preallocated_oos_pages, runtime_dir_entry, preallocated_oos_pages);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(spt_out_of_sync, runtime_dir_entry, spt_out_of_sync);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(enable_video_switch, runtime_dir_entry, enable_video_switch);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(bypass_scan_mask, runtime_dir_entry, bypass_scan);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(bypass_dom0_addr_check, runtime_dir_entry, bypass_dom0_addr_check);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(enable_panel_fitting, runtime_dir_entry, enable_panel_fitting);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(enable_reset, runtime_dir_entry, enable_reset);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(preemption_policy, runtime_dir_entry, preemption_policy);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(reset_count_threshold, runtime_dir_entry, reset_count_threshold);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(reset_dur_threshold, runtime_dir_entry, reset_dur_threshold);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(reset_max_threshold, runtime_dir_entry, reset_max_threshold);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(shadow_ctx_check, runtime_dir_entry, shadow_ctx_check);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(shadow_indirect_ctx_bb, runtime_dir_entry, shadow_indirect_ctx_bb);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(vgt_cmd_audit, runtime_dir_entry, vgt_cmd_audit);
VGT_CREATE_SYMLINK_FOR_MODULE_PARAM(vgt_hold_forcewake, runtime_dir_entry, vgt_hold_forcewake);
}

/* debugfs_remove_recursive has no return value, this fuction
* also return nothing */
void vgt_destroy_debugfs(struct vgt_device *vgt)
Expand Down
Loading

0 comments on commit 83df1d4

Please sign in to comment.