|
6 | 6 |
|
7 | 7 | #include <linux/prime_numbers.h> |
8 | 8 |
|
| 9 | +#include "intel_context.h" |
| 10 | +#include "intel_engine_heartbeat.h" |
9 | 11 | #include "intel_engine_pm.h" |
10 | 12 | #include "intel_gt.h" |
11 | 13 | #include "intel_gt_requests.h" |
@@ -750,6 +752,189 @@ static int live_hwsp_wrap(void *arg) |
750 | 752 | return err; |
751 | 753 | } |
752 | 754 |
|
| 755 | +static void engine_heartbeat_disable(struct intel_engine_cs *engine, |
| 756 | + unsigned long *saved) |
| 757 | +{ |
| 758 | + *saved = engine->props.heartbeat_interval_ms; |
| 759 | + engine->props.heartbeat_interval_ms = 0; |
| 760 | + |
| 761 | + intel_engine_pm_get(engine); |
| 762 | + intel_engine_park_heartbeat(engine); |
| 763 | +} |
| 764 | + |
| 765 | +static void engine_heartbeat_enable(struct intel_engine_cs *engine, |
| 766 | + unsigned long saved) |
| 767 | +{ |
| 768 | + intel_engine_pm_put(engine); |
| 769 | + |
| 770 | + engine->props.heartbeat_interval_ms = saved; |
| 771 | +} |
| 772 | + |
| 773 | +static int live_hwsp_rollover_kernel(void *arg) |
| 774 | +{ |
| 775 | + struct intel_gt *gt = arg; |
| 776 | + struct intel_engine_cs *engine; |
| 777 | + enum intel_engine_id id; |
| 778 | + int err = 0; |
| 779 | + |
| 780 | + /* |
| 781 | + * Run the host for long enough, and even the kernel context will |
| 782 | + * see a seqno rollover. |
| 783 | + */ |
| 784 | + |
| 785 | + for_each_engine(engine, gt, id) { |
| 786 | + struct intel_context *ce = engine->kernel_context; |
| 787 | + struct intel_timeline *tl = ce->timeline; |
| 788 | + struct i915_request *rq[3] = {}; |
| 789 | + unsigned long heartbeat; |
| 790 | + int i; |
| 791 | + |
| 792 | + engine_heartbeat_disable(engine, &heartbeat); |
| 793 | + if (intel_gt_wait_for_idle(gt, HZ / 2)) { |
| 794 | + err = -EIO; |
| 795 | + goto out; |
| 796 | + } |
| 797 | + |
| 798 | + GEM_BUG_ON(i915_active_fence_isset(&tl->last_request)); |
| 799 | + tl->seqno = 0; |
| 800 | + timeline_rollback(tl); |
| 801 | + timeline_rollback(tl); |
| 802 | + WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); |
| 803 | + |
| 804 | + for (i = 0; i < ARRAY_SIZE(rq); i++) { |
| 805 | + struct i915_request *this; |
| 806 | + |
| 807 | + this = i915_request_create(ce); |
| 808 | + if (IS_ERR(this)) { |
| 809 | + err = PTR_ERR(this); |
| 810 | + goto out; |
| 811 | + } |
| 812 | + |
| 813 | + pr_debug("%s: create fence.seqnp:%d\n", |
| 814 | + engine->name, |
| 815 | + lower_32_bits(this->fence.seqno)); |
| 816 | + |
| 817 | + GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl); |
| 818 | + |
| 819 | + rq[i] = i915_request_get(this); |
| 820 | + i915_request_add(this); |
| 821 | + } |
| 822 | + |
| 823 | + /* We expected a wrap! */ |
| 824 | + GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); |
| 825 | + |
| 826 | + if (i915_request_wait(rq[2], 0, HZ / 5) < 0) { |
| 827 | + pr_err("Wait for timeline wrap timed out!\n"); |
| 828 | + err = -EIO; |
| 829 | + goto out; |
| 830 | + } |
| 831 | + |
| 832 | + for (i = 0; i < ARRAY_SIZE(rq); i++) { |
| 833 | + if (!i915_request_completed(rq[i])) { |
| 834 | + pr_err("Pre-wrap request not completed!\n"); |
| 835 | + err = -EINVAL; |
| 836 | + goto out; |
| 837 | + } |
| 838 | + } |
| 839 | + |
| 840 | +out: |
| 841 | + for (i = 0; i < ARRAY_SIZE(rq); i++) |
| 842 | + i915_request_put(rq[i]); |
| 843 | + engine_heartbeat_enable(engine, heartbeat); |
| 844 | + if (err) |
| 845 | + break; |
| 846 | + } |
| 847 | + |
| 848 | + if (igt_flush_test(gt->i915)) |
| 849 | + err = -EIO; |
| 850 | + |
| 851 | + return err; |
| 852 | +} |
| 853 | + |
| 854 | +static int live_hwsp_rollover_user(void *arg) |
| 855 | +{ |
| 856 | + struct intel_gt *gt = arg; |
| 857 | + struct intel_engine_cs *engine; |
| 858 | + enum intel_engine_id id; |
| 859 | + int err = 0; |
| 860 | + |
| 861 | + /* |
| 862 | + * Simulate a long running user context, and force the seqno wrap |
| 863 | + * on the user's timeline. |
| 864 | + */ |
| 865 | + |
| 866 | + for_each_engine(engine, gt, id) { |
| 867 | + struct i915_request *rq[3] = {}; |
| 868 | + struct intel_timeline *tl; |
| 869 | + struct intel_context *ce; |
| 870 | + int i; |
| 871 | + |
| 872 | + ce = intel_context_create(engine); |
| 873 | + if (IS_ERR(ce)) |
| 874 | + return PTR_ERR(ce); |
| 875 | + |
| 876 | + err = intel_context_alloc_state(ce); |
| 877 | + if (err) |
| 878 | + goto out; |
| 879 | + |
| 880 | + tl = ce->timeline; |
| 881 | + if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) |
| 882 | + goto out; |
| 883 | + |
| 884 | + timeline_rollback(tl); |
| 885 | + timeline_rollback(tl); |
| 886 | + WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); |
| 887 | + |
| 888 | + for (i = 0; i < ARRAY_SIZE(rq); i++) { |
| 889 | + struct i915_request *this; |
| 890 | + |
| 891 | + this = intel_context_create_request(ce); |
| 892 | + if (IS_ERR(this)) { |
| 893 | + err = PTR_ERR(this); |
| 894 | + goto out; |
| 895 | + } |
| 896 | + |
| 897 | + pr_debug("%s: create fence.seqnp:%d\n", |
| 898 | + engine->name, |
| 899 | + lower_32_bits(this->fence.seqno)); |
| 900 | + |
| 901 | + GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl); |
| 902 | + |
| 903 | + rq[i] = i915_request_get(this); |
| 904 | + i915_request_add(this); |
| 905 | + } |
| 906 | + |
| 907 | + /* We expected a wrap! */ |
| 908 | + GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); |
| 909 | + |
| 910 | + if (i915_request_wait(rq[2], 0, HZ / 5) < 0) { |
| 911 | + pr_err("Wait for timeline wrap timed out!\n"); |
| 912 | + err = -EIO; |
| 913 | + goto out; |
| 914 | + } |
| 915 | + |
| 916 | + for (i = 0; i < ARRAY_SIZE(rq); i++) { |
| 917 | + if (!i915_request_completed(rq[i])) { |
| 918 | + pr_err("Pre-wrap request not completed!\n"); |
| 919 | + err = -EINVAL; |
| 920 | + goto out; |
| 921 | + } |
| 922 | + } |
| 923 | + |
| 924 | +out: |
| 925 | + for (i = 0; i < ARRAY_SIZE(rq); i++) |
| 926 | + i915_request_put(rq[i]); |
| 927 | + intel_context_put(ce); |
| 928 | + if (err) |
| 929 | + break; |
| 930 | + } |
| 931 | + |
| 932 | + if (igt_flush_test(gt->i915)) |
| 933 | + err = -EIO; |
| 934 | + |
| 935 | + return err; |
| 936 | +} |
| 937 | + |
753 | 938 | static int live_hwsp_recycle(void *arg) |
754 | 939 | { |
755 | 940 | struct intel_gt *gt = arg; |
@@ -827,6 +1012,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915) |
827 | 1012 | SUBTEST(live_hwsp_engine), |
828 | 1013 | SUBTEST(live_hwsp_alternate), |
829 | 1014 | SUBTEST(live_hwsp_wrap), |
| 1015 | + SUBTEST(live_hwsp_rollover_kernel), |
| 1016 | + SUBTEST(live_hwsp_rollover_user), |
830 | 1017 | }; |
831 | 1018 |
|
832 | 1019 | if (intel_gt_is_wedged(&i915->gt)) |
|
0 commit comments