Skip to content

Commit

Permalink
Merge tag 'v5.10.57' into 5.10.y
Browse files Browse the repository at this point in the history
This is the 5.10.57 stable release
  • Loading branch information
sileshn committed Aug 8, 2021
2 parents 48e3eea + 1cd6e30 commit 24106e7
Show file tree
Hide file tree
Showing 44 changed files with 332 additions and 336 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 56
SUBLEVEL = 57
EXTRAVERSION =.3
NAME = Dare mighty things

Expand Down
3 changes: 3 additions & 0 deletions drivers/firmware/arm_scmi/bus.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
{
int retval;

if (!driver->probe)
return -EINVAL;

driver->driver.bus = &scmi_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
Expand Down
8 changes: 6 additions & 2 deletions drivers/firmware/arm_scmi/driver.c
Original file line number Diff line number Diff line change
Expand Up @@ -436,8 +436,12 @@ int scmi_do_xfer_with_response(const struct scmi_handle *handle,
xfer->async_done = &async_response;

ret = scmi_do_xfer(handle, xfer);
if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
ret = -ETIMEDOUT;
if (!ret) {
if (!wait_for_completion_timeout(xfer->async_done, timeout))
ret = -ETIMEDOUT;
else if (xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
}

xfer->async_done = NULL;
return ret;
Expand Down
5 changes: 4 additions & 1 deletion drivers/firmware/efi/mokvar-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,10 @@ void __init efi_mokvar_table_init(void)
pr_err("EFI MOKvar config table is not valid\n");
return;
}
efi_mem_reserve(efi.mokvar_table, map_size_needed);

if (md.type == EFI_BOOT_SERVICES_DATA)
efi_mem_reserve(efi.mokvar_table, map_size_needed);

efi_mokvar_table_size = map_size_needed;
}

Expand Down
6 changes: 4 additions & 2 deletions drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
Original file line number Diff line number Diff line change
Expand Up @@ -2077,8 +2077,10 @@ int dcn20_populate_dml_pipes_from_context(
- timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
pipes[pipe_cnt].pipe.dest.vtotal = v_total;
pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
pipes[pipe_cnt].pipe.dest.hactive =
timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipes[pipe_cnt].pipe.dest.vactive =
timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4888,7 +4888,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
} while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true)
&& (mode_lib->vba.NextMaxVStartup != mode_lib->vba.MaxMaxVStartup[0][0]
|| mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode));
|| mode_lib->vba.NextPrefetchMode <= mode_lib->vba.MaxPrefetchMode));

if (locals->PrefetchSupported[i][j] == true && locals->VRatioInPrefetchSupported[i][j] == true) {
mode_lib->vba.BandwidthAvailableForImmediateFlip = locals->ReturnBWPerState[i][0];
Expand Down
164 changes: 9 additions & 155 deletions drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"

Expand Down Expand Up @@ -1401,6 +1400,10 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
int err;
struct intel_engine_cs *engine = eb->engine;

/* If we need to copy for the cmdparser, we will stall anyway */
if (eb_use_cmdparser(eb))
return ERR_PTR(-EWOULDBLOCK);

if (!reloc_can_use_engine(engine)) {
engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
if (!engine)
Expand Down Expand Up @@ -2267,152 +2270,6 @@ shadow_batch_pin(struct i915_execbuffer *eb,
return vma;
}

struct eb_parse_work {
struct dma_fence_work base;
struct intel_engine_cs *engine;
struct i915_vma *batch;
struct i915_vma *shadow;
struct i915_vma *trampoline;
unsigned long batch_offset;
unsigned long batch_length;
};

static int __eb_parse(struct dma_fence_work *work)
{
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);

return intel_engine_cmd_parser(pw->engine,
pw->batch,
pw->batch_offset,
pw->batch_length,
pw->shadow,
pw->trampoline);
}

static void __eb_parse_release(struct dma_fence_work *work)
{
struct eb_parse_work *pw = container_of(work, typeof(*pw), base);

if (pw->trampoline)
i915_active_release(&pw->trampoline->active);
i915_active_release(&pw->shadow->active);
i915_active_release(&pw->batch->active);
}

static const struct dma_fence_work_ops eb_parse_ops = {
.name = "eb_parse",
.work = __eb_parse,
.release = __eb_parse_release,
};

static inline int
__parser_mark_active(struct i915_vma *vma,
struct intel_timeline *tl,
struct dma_fence *fence)
{
struct intel_gt_buffer_pool_node *node = vma->private;

return i915_active_ref(&node->active, tl->fence_context, fence);
}

static int
parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
{
int err;

mutex_lock(&tl->mutex);

err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
if (err)
goto unlock;

if (pw->trampoline) {
err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
if (err)
goto unlock;
}

unlock:
mutex_unlock(&tl->mutex);
return err;
}

static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct i915_vma *shadow,
struct i915_vma *trampoline)
{
struct eb_parse_work *pw;
int err;

GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));

pw = kzalloc(sizeof(*pw), GFP_KERNEL);
if (!pw)
return -ENOMEM;

err = i915_active_acquire(&eb->batch->vma->active);
if (err)
goto err_free;

err = i915_active_acquire(&shadow->active);
if (err)
goto err_batch;

if (trampoline) {
err = i915_active_acquire(&trampoline->active);
if (err)
goto err_shadow;
}

dma_fence_work_init(&pw->base, &eb_parse_ops);

pw->engine = eb->engine;
pw->batch = eb->batch->vma;
pw->batch_offset = eb->batch_start_offset;
pw->batch_length = eb->batch_len;
pw->shadow = shadow;
pw->trampoline = trampoline;

/* Mark active refs early for this worker, in case we get interrupted */
err = parser_mark_active(pw, eb->context->timeline);
if (err)
goto err_commit;

err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
goto err_commit;

/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
0, I915_FENCE_GFP);
if (err < 0)
goto err_commit;

/* Keep the batch alive and unwritten as we parse */
dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);

/* Force execution to wait for completion of the parser */
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);

dma_fence_work_commit_imm(&pw->base);
return 0;

err_commit:
i915_sw_fence_set_error_once(&pw->base.chain, err);
dma_fence_work_commit_imm(&pw->base);
return err;

err_shadow:
i915_active_release(&shadow->active);
err_batch:
i915_active_release(&eb->batch->vma->active);
err_free:
kfree(pw);
return err;
}

static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
{
/*
Expand Down Expand Up @@ -2494,13 +2351,11 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->batch_flags |= I915_DISPATCH_SECURE;
}

batch = eb_dispatch_secure(eb, shadow);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_trampoline;
}

err = eb_parse_pipeline(eb, shadow, trampoline);
err = intel_engine_cmd_parser(eb->engine,
eb->batch->vma,
eb->batch_start_offset,
eb->batch_len,
shadow, trampoline);
if (err)
goto err_unpin_batch;

Expand All @@ -2522,7 +2377,6 @@ static int eb_parse(struct i915_execbuffer *eb)
err_unpin_batch:
if (batch)
i915_vma_unpin(batch);
err_trampoline:
if (trampoline)
i915_vma_unpin(trampoline);
err_shadow:
Expand Down
28 changes: 16 additions & 12 deletions drivers/gpu/drm/i915/i915_cmd_parser.c
Original file line number Diff line number Diff line change
Expand Up @@ -1143,27 +1143,30 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
unsigned long offset, unsigned long length)
u32 offset, u32 length)
{
bool needs_clflush;
unsigned int src_needs_clflush;
unsigned int dst_needs_clflush;
void *dst, *src;
int ret;

ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
if (ret)
return ERR_PTR(ret);

dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
i915_gem_object_finish_access(dst_obj);
if (IS_ERR(dst))
return dst;

ret = i915_gem_object_pin_pages(src_obj);
ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
if (ret) {
i915_gem_object_unpin_map(dst_obj);
return ERR_PTR(ret);
}

needs_clflush =
!(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);

src = ERR_PTR(-ENODEV);
if (needs_clflush && i915_has_memcpy_from_wc()) {
if (src_needs_clflush && i915_has_memcpy_from_wc()) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) {
i915_unaligned_memcpy_from_wc(dst,
Expand All @@ -1185,7 +1188,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* validate up to the end of the batch.
*/
remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
if (dst_needs_clflush & CLFLUSH_BEFORE)
remain = round_up(remain,
boot_cpu_data.x86_clflush_size);

Expand All @@ -1195,7 +1198,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
int len = min(remain, PAGE_SIZE - x);

src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
if (src_needs_clflush)
drm_clflush_virt_range(src + x, len);
memcpy(ptr, src + x, len);
kunmap_atomic(src);
Expand All @@ -1206,11 +1209,10 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}

i915_gem_object_unpin_pages(src_obj);
i915_gem_object_finish_access(src_obj);

memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));

/* dst_obj is returned with vmap pinned */
return dst;
}

Expand Down Expand Up @@ -1417,6 +1419,7 @@ static unsigned long *alloc_whitelist(u32 batch_length)
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/

int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
unsigned long batch_offset,
Expand All @@ -1437,7 +1440,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
batch->size));
GEM_BUG_ON(!batch_length);

cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length);
cmd = copy_batch(shadow->obj, batch->obj,
batch_offset, batch_length);
if (IS_ERR(cmd)) {
DRM_DEBUG("CMD: Failed to copy batch\n");
return PTR_ERR(cmd);
Expand Down
8 changes: 2 additions & 6 deletions drivers/gpu/drm/i915/i915_request.c
Original file line number Diff line number Diff line change
Expand Up @@ -1285,10 +1285,8 @@ i915_request_await_execution(struct i915_request *rq,

do {
fence = *child++;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
i915_sw_fence_set_error_once(&rq->submit, fence->error);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
}

if (fence->context == rq->fence.context)
continue;
Expand Down Expand Up @@ -1386,10 +1384,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)

do {
fence = *child++;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
i915_sw_fence_set_error_once(&rq->submit, fence->error);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
}

/*
* Requests on the same timeline are explicitly ordered, along
Expand Down
Loading

0 comments on commit 24106e7

Please sign in to comment.