Skip to content

Commit

Permalink
Merge tag 'v5.10.64' into 5.10
Browse files Browse the repository at this point in the history
This is the 5.10.64 stable release
  • Loading branch information
xanmod committed Sep 12, 2021
2 parents 34efa8c + cb83afd commit 5bd79be
Show file tree
Hide file tree
Showing 28 changed files with 251 additions and 173 deletions.
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 63
SUBLEVEL = 64
EXTRAVERSION =
NAME = Dare mighty things

Expand Down
47 changes: 26 additions & 21 deletions arch/x86/events/amd/iommu.c
Expand Up @@ -18,8 +18,6 @@
#include "../perf_event.h"
#include "iommu.h"

#define COUNTER_SHIFT 16

/* iommu pmu conf masks */
#define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
#define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
Expand Down Expand Up @@ -285,22 +283,31 @@ static void perf_iommu_start(struct perf_event *event, int flags)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;

/*
* To account for power-gating, which prevents write to
* the counter, we need to enable the counter
* before setting up counter register.
*/
perf_iommu_enable_event(event);

if (flags & PERF_EF_RELOAD) {
u64 prev_raw_count = local64_read(&hwc->prev_count);
u64 count = 0;
struct amd_iommu *iommu = perf_event_2_iommu(event);

/*
* Since the IOMMU PMU only support counting mode,
* the counter always start with value zero.
*/
amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
IOMMU_PC_COUNTER_REG, &prev_raw_count);
IOMMU_PC_COUNTER_REG, &count);
}

perf_iommu_enable_event(event);
perf_event_update_userpage(event);

}

static void perf_iommu_read(struct perf_event *event)
{
u64 count, prev, delta;
u64 count;
struct hw_perf_event *hwc = &event->hw;
struct amd_iommu *iommu = perf_event_2_iommu(event);

Expand All @@ -311,14 +318,11 @@ static void perf_iommu_read(struct perf_event *event)
/* IOMMU pc counter register is only 48 bits */
count &= GENMASK_ULL(47, 0);

prev = local64_read(&hwc->prev_count);
if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
return;

/* Handle 48-bit counter overflow */
delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
delta >>= COUNTER_SHIFT;
local64_add(delta, &event->count);
/*
* Since the counter always start with value zero,
* simply just accumulate the count for the event.
*/
local64_add(count, &event->count);
}

static void perf_iommu_stop(struct perf_event *event, int flags)
Expand All @@ -328,15 +332,16 @@ static void perf_iommu_stop(struct perf_event *event, int flags)
if (hwc->state & PERF_HES_UPTODATE)
return;

/*
* To account for power-gating, in which reading the counter would
* return zero, we need to read the register before disabling.
*/
perf_iommu_read(event);
hwc->state |= PERF_HES_UPTODATE;

perf_iommu_disable_event(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;

if (hwc->state & PERF_HES_UPTODATE)
return;

perf_iommu_read(event);
hwc->state |= PERF_HES_UPTODATE;
}

static int perf_iommu_add(struct perf_event *event, int flags)
Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kernel/reboot.c
Expand Up @@ -388,10 +388,11 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
},
{ /* Handle problems with rebooting on the OptiPlex 990. */
.callback = set_pci_reboot,
.ident = "Dell OptiPlex 990",
.ident = "Dell OptiPlex 990 BIOS A0x",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
DMI_MATCH(DMI_BIOS_VERSION, "A0"),
},
},
{ /* Handle problems with rebooting on Dell 300's */
Expand Down
1 change: 0 additions & 1 deletion block/blk-core.c
Expand Up @@ -121,7 +121,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->internal_tag = BLK_MQ_NO_TAG;
rq->start_time_ns = ktime_get_ns();
rq->part = NULL;
refcount_set(&rq->ref, 1);
blk_crypto_rq_set_defaults(rq);
}
EXPORT_SYMBOL(blk_rq_init);
Expand Down
13 changes: 13 additions & 0 deletions block/blk-flush.c
Expand Up @@ -263,6 +263,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}

bool is_flush_rq(struct request *rq)
{
return rq->end_io == flush_end_io;
}

/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
Expand Down Expand Up @@ -330,6 +335,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
/*
* Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
* implied in refcount_inc_not_zero() called from
* blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
* and READ flush_rq->end_io
*/
smp_wmb();
refcount_set(&flush_rq->ref, 1);

blk_flush_queue_rq(flush_rq, false);
}
Expand Down
37 changes: 35 additions & 2 deletions block/blk-mq.c
Expand Up @@ -929,7 +929,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)

void blk_mq_put_rq_ref(struct request *rq)
{
if (is_flush_rq(rq, rq->mq_hctx))
if (is_flush_rq(rq))
rq->end_io(rq, 0);
else if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
Expand Down Expand Up @@ -2589,16 +2589,49 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
&hctx->cpuhp_dead);
}

/*
* Before freeing hw queue, clearing the flush request reference in
* tags->rqs[] for avoiding potential UAF.
*/
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
unsigned int queue_depth, struct request *flush_rq)
{
int i;
unsigned long flags;

/* The hw queue may not be mapped yet */
if (!tags)
return;

WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);

for (i = 0; i < queue_depth; i++)
cmpxchg(&tags->rqs[i], flush_rq, NULL);

/*
* Wait until all pending iteration is done.
*
* Request reference is cleared and it is guaranteed to be observed
* after the ->lock is released.
*/
spin_lock_irqsave(&tags->lock, flags);
spin_unlock_irqrestore(&tags->lock, flags);
}

/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
struct request *flush_rq = hctx->fq->flush_rq;

if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx);

blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
set->queue_depth, flush_rq);
if (set->ops->exit_request)
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
set->ops->exit_request(set, flush_rq, hctx_idx);

if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
Expand Down
6 changes: 1 addition & 5 deletions block/blk.h
Expand Up @@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}

static inline bool
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
{
return hctx->fq->flush_rq == req;
}
bool is_flush_rq(struct request *req);

struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
gfp_t flags);
Expand Down
1 change: 1 addition & 0 deletions drivers/net/ethernet/realtek/r8169_main.c
Expand Up @@ -3547,6 +3547,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);

rtl_pcie_state_l2l3_disable(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
}

DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
Expand Down
4 changes: 1 addition & 3 deletions drivers/net/ethernet/xilinx/ll_temac_main.c
Expand Up @@ -942,10 +942,8 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
wmb();
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */

if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
netif_stop_queue(ndev);
}

return NETDEV_TX_OK;
}
Expand Down
12 changes: 6 additions & 6 deletions drivers/pci/quirks.c
Expand Up @@ -3246,12 +3246,12 @@ static void fixup_mpss_256(struct pci_dev *dev)
{
dev->pcie_mpss = 1; /* 256 bytes */
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);

/*
* Intel 5000 and 5100 Memory controllers have an erratum with read completion
Expand Down
26 changes: 13 additions & 13 deletions drivers/tty/serial/8250/8250_omap.c
Expand Up @@ -538,6 +538,11 @@ static void omap_8250_pm(struct uart_port *port, unsigned int state,
static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
struct omap8250_priv *priv)
{
const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM65X", },
{ .family = "J721E", .revision = "SR1.0" },
{ /* sentinel */ }
};
u32 mvr, scheme;
u16 revision, major, minor;

Expand Down Expand Up @@ -585,6 +590,14 @@ static void omap_serial_fill_features_erratas(struct uart_8250_port *up,
default:
break;
}

/*
* AM65x SR1.0, AM65x SR2.0 and J721e SR1.0 don't
* don't have RHR_IT_DIS bit in IER2 register. So drop to flag
* to enable errata workaround.
*/
if (soc_device_match(k3_soc_devices))
priv->habit &= ~UART_HAS_RHR_IT_DIS;
}

static void omap8250_uart_qos_work(struct work_struct *work)
Expand Down Expand Up @@ -1208,12 +1221,6 @@ static int omap8250_no_handle_irq(struct uart_port *port)
return 0;
}

static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM65X", },
{ .family = "J721E", .revision = "SR1.0" },
{ /* sentinel */ }
};

static struct omap8250_dma_params am654_dma = {
.rx_size = SZ_2K,
.rx_trigger = 1,
Expand Down Expand Up @@ -1419,13 +1426,6 @@ static int omap8250_probe(struct platform_device *pdev)
up.dma->rxconf.src_maxburst = RX_TRIGGER;
up.dma->txconf.dst_maxburst = TX_TRIGGER;
}

/*
* AM65x SR1.0, AM65x SR2.0 and J721e SR1.0 don't
* don't have RHR_IT_DIS bit in IER2 register
*/
if (soc_device_match(k3_soc_devices))
priv->habit &= ~UART_HAS_RHR_IT_DIS;
}
#endif
ret = serial8250_register_8250_port(&up);
Expand Down
4 changes: 2 additions & 2 deletions drivers/usb/gadget/udc/tegra-xudc.c
Expand Up @@ -1610,7 +1610,7 @@ static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
u16 maxpacket, maxburst = 0, esit = 0;
u32 val;

maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
maxpacket = usb_endpoint_maxp(desc);
if (xudc->gadget.speed == USB_SPEED_SUPER) {
if (!usb_endpoint_xfer_control(desc))
maxburst = comp_desc->bMaxBurst;
Expand All @@ -1621,7 +1621,7 @@ static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
(usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc))) {
if (xudc->gadget.speed == USB_SPEED_HIGH) {
maxburst = (usb_endpoint_maxp(desc) >> 11) & 0x3;
maxburst = usb_endpoint_maxp_mult(desc) - 1;
if (maxburst == 0x3) {
dev_warn(xudc->dev,
"invalid endpoint maxburst\n");
Expand Down
14 changes: 10 additions & 4 deletions drivers/usb/host/xhci-debugfs.c
Expand Up @@ -198,12 +198,13 @@ static void xhci_ring_dump_segment(struct seq_file *s,
int i;
dma_addr_t dma;
union xhci_trb *trb;
char str[XHCI_MSG_MAX];

for (i = 0; i < TRBS_PER_SEGMENT; i++) {
trb = &seg->trbs[i];
dma = seg->dma + i * sizeof(*trb);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_trb(le32_to_cpu(trb->generic.field[0]),
xhci_decode_trb(str, XHCI_MSG_MAX, le32_to_cpu(trb->generic.field[0]),
le32_to_cpu(trb->generic.field[1]),
le32_to_cpu(trb->generic.field[2]),
le32_to_cpu(trb->generic.field[3])));
Expand Down Expand Up @@ -260,11 +261,13 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
struct xhci_slot_ctx *slot_ctx;
struct xhci_slot_priv *priv = s->private;
struct xhci_virt_device *dev = priv->dev;
char str[XHCI_MSG_MAX];

xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info),
xhci_decode_slot_context(str,
le32_to_cpu(slot_ctx->dev_info),
le32_to_cpu(slot_ctx->dev_info2),
le32_to_cpu(slot_ctx->tt_info),
le32_to_cpu(slot_ctx->dev_state)));
Expand All @@ -280,14 +283,16 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_priv *priv = s->private;
struct xhci_virt_device *dev = priv->dev;
char str[XHCI_MSG_MAX];

xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));

for (ep_index = 0; ep_index < 31; ep_index++) {
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
xhci_decode_ep_context(str,
le32_to_cpu(ep_ctx->ep_info),
le32_to_cpu(ep_ctx->ep_info2),
le64_to_cpu(ep_ctx->deq),
le32_to_cpu(ep_ctx->tx_info)));
Expand Down Expand Up @@ -341,9 +346,10 @@ static int xhci_portsc_show(struct seq_file *s, void *unused)
{
struct xhci_port *port = s->private;
u32 portsc;
char str[XHCI_MSG_MAX];

portsc = readl(port->addr);
seq_printf(s, "%s\n", xhci_decode_portsc(portsc));
seq_printf(s, "%s\n", xhci_decode_portsc(str, portsc));

return 0;
}
Expand Down

0 comments on commit 5bd79be

Please sign in to comment.