Skip to content

Commit

Permalink
Merge remote-tracking branch 'stable/linux-6.1.y' into rpi-6.1.y
Browse files Browse the repository at this point in the history
  • Loading branch information
popcornmix committed Jul 10, 2023
2 parents 1d15e6a + 61fd484 commit 31dbf25
Show file tree
Hide file tree
Showing 10 changed files with 130 additions and 45 deletions.
7 changes: 7 additions & 0 deletions Documentation/process/changes.rst
Expand Up @@ -60,6 +60,7 @@ openssl & libcrypto 1.0.0 openssl version
bc 1.06.95 bc --version
Sphinx\ [#f1]_ 1.7 sphinx-build --version
cpio any cpio --version
gtags (optional) 6.6.5 gtags --version
====================== =============== ========================================

.. [#f1] Sphinx is needed only to build the Kernel documentation
Expand Down Expand Up @@ -174,6 +175,12 @@ You will need openssl to build kernels 3.7 and higher if module signing is
enabled. You will also need openssl development packages to build kernels 4.3
and higher.

gtags / GNU GLOBAL (optional)
-----------------------------

The kernel build requires GNU GLOBAL version 6.6.5 or later to generate
tag files through ``make gtags``. This is due to its use of the gtags
``-C (--directory)`` flag.

System utilities
****************
Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 37
SUBLEVEL = 38
EXTRAVERSION =
NAME = Curry Ramen

Expand Down
4 changes: 4 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Expand Up @@ -2363,6 +2363,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
long timeout = msecs_to_jiffies(2000);
int r;

/* No valid flags defined yet */
if (args->in.flags)
return -EINVAL;

switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
Expand Down
50 changes: 29 additions & 21 deletions drivers/gpu/drm/amd/display/dc/core/dc.c
Expand Up @@ -401,8 +401,13 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;

if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0)
return true;
/*
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
if (dc->optimized_required || dc->wm_optimized_required)
return false;

stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
Expand Down Expand Up @@ -2024,27 +2029,33 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)

post_surface_trace(dc);

if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
/*
* Only relevant for DCN behavior where we can guarantee the optimization
* is safe to apply - retain the legacy behavior for DCE.
*/

if (dc->ctx->dce_version < DCE_VERSION_MAX)
TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
else {
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);

if (is_flip_pending_in_pipes(dc, context))
return;
if (is_flip_pending_in_pipes(dc, context))
return;

for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}

process_deferred_updates(dc);
process_deferred_updates(dc);

dc->hwss.optimize_bandwidth(dc, context);
dc->hwss.optimize_bandwidth(dc, context);

if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, true);
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, true);
}

dc->optimized_required = false;
dc->wm_optimized_required = false;
Expand Down Expand Up @@ -3869,12 +3880,9 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
} else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
} else if (update_type == UPDATE_TYPE_FAST) {
/*
* Previous frame finished and HW is ready for optimization.
*
* Only relevant for DCN behavior where we can guarantee the optimization
* is safe to apply - retain the legacy behavior for DCE.
*/
dc_post_update_surfaces_to_stream(dc);
}
Expand Down
22 changes: 17 additions & 5 deletions drivers/nubus/proc.c
Expand Up @@ -137,6 +137,18 @@ static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
return 0;
}

static int nubus_rsrc_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, nubus_proc_rsrc_show, inode);
}

static const struct proc_ops nubus_rsrc_proc_ops = {
.proc_open = nubus_rsrc_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};

void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
const struct nubus_dirent *ent,
unsigned int size)
Expand All @@ -152,8 +164,8 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
pded = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
else
pded = NULL;
proc_create_single_data(name, S_IFREG | 0444, procdir,
nubus_proc_rsrc_show, pded);
proc_create_data(name, S_IFREG | 0444, procdir,
&nubus_rsrc_proc_ops, pded);
}

void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
Expand All @@ -166,9 +178,9 @@ void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
return;

snprintf(name, sizeof(name), "%x", ent->type);
proc_create_single_data(name, S_IFREG | 0444, procdir,
nubus_proc_rsrc_show,
nubus_proc_alloc_pde_data(data, 0));
proc_create_data(name, S_IFREG | 0444, procdir,
&nubus_rsrc_proc_ops,
nubus_proc_alloc_pde_data(data, 0));
}

/*
Expand Down
53 changes: 40 additions & 13 deletions drivers/pci/pci-acpi.c
Expand Up @@ -1043,6 +1043,16 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
return false;
}

static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable)
{
int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT;
int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev),
ACPI_ADR_SPACE_PCI_CONFIG, val);
if (ret)
pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n",
enable ? "connect" : "disconnect", ret);
}

int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
Expand All @@ -1053,32 +1063,49 @@ int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
[PCI_D3hot] = ACPI_STATE_D3_HOT,
[PCI_D3cold] = ACPI_STATE_D3_COLD,
};
int error = -EINVAL;
int error;

/* If the ACPI device has _EJ0, ignore the device */
if (!adev || acpi_has_method(adev->handle, "_EJ0"))
return -ENODEV;

switch (state) {
case PCI_D3cold:
if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
PM_QOS_FLAGS_ALL) {
error = -EBUSY;
break;
}
fallthrough;
case PCI_D0:
case PCI_D1:
case PCI_D2:
case PCI_D3hot:
error = acpi_device_set_power(adev, state_conv[state]);
case PCI_D3cold:
break;
default:
return -EINVAL;
}

if (state == PCI_D3cold) {
if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
PM_QOS_FLAGS_ALL)
return -EBUSY;

/* Notify AML lack of PCI config space availability */
acpi_pci_config_space_access(dev, false);
}

if (!error)
pci_dbg(dev, "power state changed by ACPI to %s\n",
acpi_power_state_string(adev->power.state));
error = acpi_device_set_power(adev, state_conv[state]);
if (error)
return error;

return error;
pci_dbg(dev, "power state changed by ACPI to %s\n",
acpi_power_state_string(adev->power.state));

/*
* Notify AML of PCI config space availability. Config space is
* accessible in all states except D3cold; the only transitions
* that change availability are transitions to D3cold and from
* D3cold to D0.
*/
if (state == PCI_D0)
acpi_pci_config_space_access(dev, true);

return 0;
}

pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
Expand Down
4 changes: 3 additions & 1 deletion include/linux/mm.h
Expand Up @@ -378,7 +378,7 @@ extern unsigned int kobjsize(const void *objp);
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */

/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)

#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)

Expand All @@ -400,8 +400,10 @@ extern unsigned int kobjsize(const void *objp);

#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK VM_GROWSUP
#define VM_STACK_EARLY VM_GROWSDOWN
#else
#define VM_STACK VM_GROWSDOWN
#define VM_STACK_EARLY 0
#endif

#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
Expand Down
7 changes: 6 additions & 1 deletion mm/nommu.c
Expand Up @@ -688,8 +688,13 @@ EXPORT_SYMBOL(find_vma);
struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
unsigned long addr, struct pt_regs *regs)
{
struct vm_area_struct *vma;

mmap_read_lock(mm);
return vma_lookup(mm, addr);
vma = vma_lookup(mm, addr);
if (!vma)
mmap_read_unlock(mm);
return vma;
}

/*
Expand Down
9 changes: 8 additions & 1 deletion scripts/tags.sh
Expand Up @@ -25,6 +25,13 @@ else
tree=${srctree}/
fi

# gtags(1) refuses to index any file outside of its current working dir.
# If gtags indexing is requested and the build output directory is not
# the kernel source tree, index all files in absolute-path form.
if [[ "$1" == "gtags" && -n "${tree}" ]]; then
tree=$(realpath "$tree")/
fi

# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
if [ "${ALLSOURCE_ARCHS}" = "" ]; then
ALLSOURCE_ARCHS=${SRCARCH}
Expand Down Expand Up @@ -124,7 +131,7 @@ docscope()

dogtags()
{
all_target_sources | gtags -i -f -
all_target_sources | gtags -i -C "${tree:-.}" -f - "$PWD"
}

# Basic regular expressions with an optional /kind-spec/ for ctags and
Expand Down
17 changes: 15 additions & 2 deletions tools/perf/util/symbol.c
Expand Up @@ -1368,10 +1368,23 @@ static int dso__load_kcore(struct dso *dso, struct map *map,

/* Find the kernel map using the '_stext' symbol */
if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
u64 replacement_size = 0;

list_for_each_entry(new_map, &md.maps, node) {
if (stext >= new_map->start && stext < new_map->end) {
u64 new_size = new_map->end - new_map->start;

if (!(stext >= new_map->start && stext < new_map->end))
continue;

/*
* On some architectures, ARM64 for example, the kernel
* text can get allocated inside of the vmalloc segment.
* Select the smallest matching segment, in case stext
* falls within more than one in the list.
*/
if (!replacement_map || new_size < replacement_size) {
replacement_map = new_map;
break;
replacement_size = new_size;
}
}
}
Expand Down

0 comments on commit 31dbf25

Please sign in to comment.