diff --git a/COPYING-5.14.0-570.39.1.el9_6 b/COPYING-5.14.0-570.42.2.el9_6 similarity index 100% rename from COPYING-5.14.0-570.39.1.el9_6 rename to COPYING-5.14.0-570.42.2.el9_6 diff --git a/Makefile.rhelver b/Makefile.rhelver index 18fe1a6366562..2bf3b1df48677 100644 --- a/Makefile.rhelver +++ b/Makefile.rhelver @@ -12,7 +12,7 @@ RHEL_MINOR = 6 # # Use this spot to avoid future merge conflicts. # Do not trim this comment. -RHEL_RELEASE = 570.39.1 +RHEL_RELEASE = 570.42.2 # # ZSTREAM diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index 4c6c6dbd182f4..2a41b91f0ca78 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h @@ -35,6 +35,7 @@ extern cpumask_var_t node_to_cpumask_map[]; #ifdef CONFIG_MEMORY_HOTPLUG extern unsigned long max_pfn; u64 memory_hotplug_max(void); +u64 hot_add_drconf_memory_max(void); #else #define memory_hotplug_max() memblock_end_of_DRAM() #endif diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 8ce57f0527e5d..c36e8fbe5faa7 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1354,7 +1354,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr) return nid; } -static u64 hot_add_drconf_memory_max(void) +u64 hot_add_drconf_memory_max(void) { struct device_node *memory = NULL; struct device_node *dn = NULL; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 1b391c7e2e78d..3ac8cd0d3226d 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -52,7 +52,8 @@ enum { enum { DDW_EXT_SIZE = 0, DDW_EXT_RESET_DMA_WIN = 1, - DDW_EXT_QUERY_OUT_SIZE = 2 + DDW_EXT_QUERY_OUT_SIZE = 2, + DDW_EXT_LIMITED_ADDR_MODE = 3 }; static struct iommu_table *iommu_pseries_alloc_table(int node) @@ -196,7 +197,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl) static void tce_free_pSeries(struct iommu_table *tbl) { - if (!tbl->it_userspace) + if (tbl->it_userspace) tce_iommu_userspace_view_free(tbl); } @@ -1287,17 +1288,13 @@ static LIST_HEAD(failed_ddw_pdn_list); static phys_addr_t ddw_memory_hotplug_max(void) { - resource_size_t max_addr = memory_hotplug_max(); - struct device_node *memory; + resource_size_t max_addr; - for_each_node_by_type(memory, "memory") { - struct resource res; - - if (of_address_to_resource(memory, 0, &res)) - continue; - - max_addr = max_t(resource_size_t, max_addr, res.end + 1); - } +#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) + max_addr = hot_add_drconf_memory_max(); +#else + max_addr = memblock_end_of_DRAM(); +#endif return max_addr; } @@ -1334,6 +1331,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn) ret); } +/* + * Platforms support placing PHB in limited address mode starting with LoPAR + * level 2.13 implement. In this mode, the DMA address returned by DDW is over + * 4GB but, less than 64-bits. This benefits IO adapters that don't support + * 64-bits for DMA addresses. + */ +static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn) +{ + int ret; + u32 cfg_addr, reset_dma_win, las_supported; + u64 buid; + struct device_node *dn; + struct pci_dn *pdn; + + ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win); + if (ret) + goto out; + + ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported); + + /* Limited Address Space extension available on the platform but DDW in + * limited addressing mode not supported + */ + if (!ret && !las_supported) + ret = -EPROTO; + + if (ret) { + dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret); + goto out; + } + + dn = pci_device_to_OF_node(dev); + pdn = PCI_DN(dn); + buid = pdn->phb->buid; + cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8); + + ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid), + BUID_LO(buid), 1); + if (ret) + dev_info(&dev->dev, + "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ", + reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid), + ret); + +out: + return ret; +} + /* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */ static int iommu_get_page_shift(u32 query_page_size) { @@ -1401,7 +1446,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64 * * returns true if can map all pages (direct mapping), false otherwise.. */ -static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) +static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask) { int len = 0, ret; int max_ram_len = order_base_2(ddw_memory_hotplug_max()); @@ -1420,6 +1465,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) bool pmem_present; struct pci_dn *pci = PCI_DN(pdn); struct property *default_win = NULL; + bool limited_addr_req = false, limited_addr_enabled = false; + int dev_max_ddw; + int ddw_sz; dn = of_find_node_by_type(NULL, "ibm,pmemory"); pmem_present = dn != NULL; @@ -1446,7 +1494,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) * the ibm,ddw-applicable property holds the tokens for: * ibm,query-pe-dma-window * ibm,create-pe-dma-window - * ibm,remove-pe-dma-window * for the given node in that order. * the property is actually in the parent, not the PE */ @@ -1466,6 +1513,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (ret != 0) goto out_failed; + /* DMA Limited Addressing required? This is when the driver has + * requested to create DDW but supports mask which is less than 64-bits + */ + limited_addr_req = (dma_mask != DMA_BIT_MASK(64)); + + /* place the PHB in Limited Addressing mode */ + if (limited_addr_req) { + if (limited_dma_window(dev, pdn)) + goto out_failed; + + /* PHB is in Limited address mode */ + limited_addr_enabled = true; + } + /* * If there is no window available, remove the default DMA window, * if it's present. This will make all the resources available to the @@ -1512,6 +1573,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_failed; } + /* Maximum DMA window size that the device can address (in log2) */ + dev_max_ddw = fls64(dma_mask); + + /* If the device DMA mask is less than 64-bits, make sure the DMA window + * size is not bigger than what the device can access + */ + ddw_sz = min(order_base_2(query.largest_available_block << page_shift), + dev_max_ddw); + /* * The "ibm,pmemory" can appear anywhere in the address space. * Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS @@ -1520,23 +1590,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) */ len = max_ram_len; if (pmem_present) { - if (query.largest_available_block >= - (1ULL << (MAX_PHYSMEM_BITS - page_shift))) + if (ddw_sz >= MAX_PHYSMEM_BITS) len = MAX_PHYSMEM_BITS; else dev_info(&dev->dev, "Skipping ibm,pmemory"); } /* check if the available block * number of ptes will map everything */ - if (query.largest_available_block < (1ULL << (len - page_shift))) { + if (ddw_sz < len) { dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu %llu-sized pages\n", 1ULL << len, query.largest_available_block, 1ULL << page_shift); - len = order_base_2(query.largest_available_block << page_shift); - + len = ddw_sz; dynamic_mapping = true; } else { direct_mapping = !default_win_removed || @@ -1550,8 +1618,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) */ if (default_win_removed && pmem_present && !direct_mapping) { /* DDW is big enough to be split */ - if ((query.largest_available_block << page_shift) >= - MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) { + if ((1ULL << ddw_sz) >= + MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) { + direct_mapping = true; /* offset of the Dynamic part of DDW */ @@ -1562,8 +1631,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) dynamic_mapping = true; /* create max size DDW possible */ - len = order_base_2(query.largest_available_block - << page_shift); + len = ddw_sz; } } @@ -1603,7 +1671,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (direct_mapping) { /* DDW maps the whole partition, so enable direct DMA mapping */ - ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, + ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT, win64->value, tce_setrange_multi_pSeriesLP_walk); if (ret) { dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", @@ -1691,7 +1759,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) __remove_dma_window(pdn, ddw_avail, create.liobn); out_failed: - if (default_win_removed) + if (default_win_removed || limited_addr_enabled) reset_dma_window(dev, pdn); fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); @@ -1710,6 +1778,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << max_ram_len); + dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n", + limited_addr_req, limited_addr_enabled, direct_mapping); + return direct_mapping; } @@ -1835,8 +1906,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) { struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; - /* only attempt to use a new window if 64-bit DMA is requested */ - if (dma_mask < DMA_BIT_MASK(64)) + /* For DDW, DMA mask should be more than 32-bits. For mask more then + * 32-bits but less then 64-bits, DMA addressing is supported in + * Limited Addressing mode. + */ + if (dma_mask <= DMA_BIT_MASK(32)) return false; dev_dbg(&pdev->dev, "node is %pOF\n", dn); @@ -1849,7 +1923,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) */ pdn = pci_dma_find(dn, NULL); if (pdn && PCI_DN(pdn)) - return enable_ddw(pdev, pdn); + return enable_ddw(pdev, pdn, dma_mask); return false; } @@ -2349,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, struct memory_notify *arg = data; int ret = 0; + /* This notifier can get called when onlining persistent memory as well. + * TCEs are not pre-mapped for persistent memory. Persistent memory will + * always be above ddw_memory_hotplug_max() + */ + switch (action) { case MEM_GOING_ONLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - if (window->direct) { + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); } @@ -2365,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, case MEM_OFFLINE: spin_lock(&dma_win_list_lock); list_for_each_entry(window, &dma_win_list, list) { - if (window->direct) { + if (window->direct && (arg->start_pfn << PAGE_SHIFT) < + ddw_memory_hotplug_max()) { ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, arg->nr_pages, window->prop); } diff --git a/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/94487653.failed b/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/94487653.failed new file mode 100644 index 0000000000000..f6a2b0f1b3aae --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/94487653.failed @@ -0,0 +1,130 @@ +smb: convert to ctime accessor functions + +jira LE-4159 +Rebuild_History Non-Buildable kernel-5.14.0-570.41.1.el9_6 +commit-author Jeff Layton +commit 9448765397b6e6522dff485f4b480b7ee020a536 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/94487653.failed + +In later patches, we're going to change how the inode's ctime field is +used. Switch to using accessor functions instead of raw accesses of +inode->i_ctime. + + Acked-by: Tom Talpey + Reviewed-by: Sergey Senozhatsky + Signed-off-by: Jeff Layton + Acked-by: Steve French +Message-Id: <20230705190309.579783-72-jlayton@kernel.org> + Signed-off-by: Christian Brauner +(cherry picked from commit 9448765397b6e6522dff485f4b480b7ee020a536) + Signed-off-by: Jonathan Maple + +# Conflicts: +# fs/smb/server/smb2pdu.c +* Unmerged path fs/smb/server/smb2pdu.c +diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c +index 254347d16db6..21a887f3a36f 100644 +--- a/fs/smb/client/file.c ++++ b/fs/smb/client/file.c +@@ -1066,7 +1066,7 @@ int cifs_close(struct inode *inode, struct file *file) + if ((cfile->status_file_deleted == false) && + (smb2_can_defer_close(inode, dclose))) { + if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { +- inode->i_ctime = inode->i_mtime = current_time(inode); ++ inode->i_mtime = inode_set_ctime_current(inode); + } + spin_lock(&cinode->deferred_lock); + cifs_add_deferred_close(cfile, dclose); +@@ -2634,7 +2634,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) + write_data, to - from, &offset); + cifsFileInfo_put(open_file); + /* Does mm or vfs already set times? */ +- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); ++ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); + if ((bytes_written > 0) && (offset)) + rc = 0; + else if (bytes_written < 0) +diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h +index c691b98b442a..82e7f7a15491 100644 +--- a/fs/smb/client/fscache.h ++++ b/fs/smb/client/fscache.h +@@ -50,12 +50,13 @@ void cifs_fscache_fill_coherency(struct inode *inode, + struct cifs_fscache_inode_coherency_data *cd) + { + struct cifsInodeInfo *cifsi = CIFS_I(inode); ++ struct timespec64 ctime = inode_get_ctime(inode); + + memset(cd, 0, sizeof(*cd)); + cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec); + cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec); +- cd->last_change_time_sec = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec); +- cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec); ++ cd->last_change_time_sec = cpu_to_le64(ctime.tv_sec); ++ cd->last_change_time_nsec = cpu_to_le32(ctime.tv_nsec); + } + + +diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c +index 83d1b0b9f5c1..2c38d3214c78 100644 +--- a/fs/smb/client/inode.c ++++ b/fs/smb/client/inode.c +@@ -170,7 +170,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr, + else + inode->i_atime = fattr->cf_atime; + inode->i_mtime = fattr->cf_mtime; +- inode->i_ctime = fattr->cf_ctime; ++ inode_set_ctime_to_ts(inode, fattr->cf_ctime); + inode->i_rdev = fattr->cf_rdev; + cifs_nlink_fattr_to_inode(inode, fattr); + inode->i_uid = fattr->cf_uid; +@@ -2004,9 +2004,9 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) + cifs_inode = CIFS_I(inode); + cifs_inode->time = 0; /* will force revalidate to get info + when needed */ +- inode->i_ctime = current_time(inode); ++ inode_set_ctime_current(inode); + } +- dir->i_ctime = dir->i_mtime = current_time(dir); ++ dir->i_mtime = inode_set_ctime_current(dir); + cifs_inode = CIFS_I(dir); + CIFS_I(dir)->time = 0; /* force revalidate of dir as well */ + unlink_out: +@@ -2322,8 +2322,8 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) + */ + cifsInode->time = 0; + +- d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime = +- current_time(inode); ++ inode_set_ctime_current(d_inode(direntry)); ++ inode->i_mtime = inode_set_ctime_current(inode); + + rmdir_exit: + free_dentry_path(page); +@@ -2537,8 +2537,8 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, + /* force revalidate to go get info when needed */ + CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; + +- source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime = +- target_dir->i_mtime = current_time(source_dir); ++ source_dir->i_mtime = target_dir->i_mtime = inode_set_ctime_to_ts(source_dir, ++ inode_set_ctime_current(target_dir)); + + cifs_rename_exit: + kfree(info_buf_source); +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index ecd0b5c79c22..07283c98c575 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -1443,7 +1443,8 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon, + if (file_inf.LastWriteTime) + inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime); + if (file_inf.ChangeTime) +- inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime); ++ inode_set_ctime_to_ts(inode, ++ cifs_NTtimeToUnix(file_inf.ChangeTime)); + if (file_inf.LastAccessTime) + inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime); + +* Unmerged path fs/smb/server/smb2pdu.c diff --git a/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/bcd6e41d.failed b/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/bcd6e41d.failed new file mode 100644 index 0000000000000..93b26bdc49431 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/bcd6e41d.failed @@ -0,0 +1,195 @@ +crypto: engine - Remove prepare/unprepare request + +jira LE-4159 +Rebuild_History Non-Buildable kernel-5.14.0-570.41.1.el9_6 +commit-author Herbert Xu +commit bcd6e41d983621954dfc3f1f64249a55838b3e6a +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/bcd6e41d.failed + +The callbacks for prepare and unprepare request in crypto_engine +is superfluous. They can be done directly from do_one_request. + +Move the code into do_one_request and remove the unused callbacks. + + Signed-off-by: Herbert Xu +(cherry picked from commit bcd6e41d983621954dfc3f1f64249a55838b3e6a) + Signed-off-by: Jonathan Maple + +# Conflicts: +# crypto/crypto_engine.c +# include/crypto/engine.h +diff --cc crypto/crypto_engine.c +index 5c4b4f02100a,17f7955500a0..000000000000 +--- a/crypto/crypto_engine.c ++++ b/crypto/crypto_engine.c +@@@ -59,16 -40,8 +55,21 @@@ static void crypto_finalize_request(str + spin_unlock_irqrestore(&engine->queue_lock, flags); + } + +++<<<<<<< HEAD + + if (finalize_req || engine->retry_support) { + + enginectx = crypto_tfm_ctx(req->tfm); + + if (enginectx->op.prepare_request && + + enginectx->op.unprepare_request) { + + ret = enginectx->op.unprepare_request(engine, req); + + if (ret) + + dev_err(engine->dev, "failed to unprepare request\n"); + + } + + } + + req->complete(req, err); +++======= ++ lockdep_assert_in_softirq(); ++ crypto_request_complete(req, err); +++>>>>>>> bcd6e41d9836 (crypto: engine - Remove prepare/unprepare request) + + kthread_queue_work(engine->kworker, &engine->pump_requests); + } +@@@ -164,30 -132,15 +165,37 @@@ start_request + } + } + + - enginectx = crypto_tfm_ctx(async_req->tfm); + - + + if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) { + + alg = container_of(async_req->tfm->__crt_alg, + + struct crypto_engine_alg, base); + + op = &alg->op; + + } else { + + enginectx = crypto_tfm_ctx(async_req->tfm); + + op = &enginectx->op; + + +++<<<<<<< HEAD + + if (op->prepare_request) { + + ret = op->prepare_request(engine, async_req); + + if (ret) { + + dev_err(engine->dev, "failed to prepare request: %d\n", + + ret); + + goto req_err_2; + + } + + } + + if (!op->do_one_request) { + + dev_err(engine->dev, "failed to do request\n"); + + ret = -EINVAL; + + goto req_err_1; + + } +++======= ++ if (!enginectx->op.do_one_request) { ++ dev_err(engine->dev, "failed to do request\n"); ++ ret = -EINVAL; ++ goto req_err_1; +++>>>>>>> bcd6e41d9836 (crypto: engine - Remove prepare/unprepare request) + } + + - ret = enginectx->op.do_one_request(engine, async_req); + + ret = op->do_one_request(engine, async_req); + + /* Request unsuccessfully executed by hardware */ + if (ret < 0) { +@@@ -230,16 -171,12 +226,20 @@@ + goto retry; + + req_err_1: +++<<<<<<< HEAD + + if (enginectx->op.unprepare_request) { + + ret = enginectx->op.unprepare_request(engine, async_req); + + if (ret) + + dev_err(engine->dev, "failed to unprepare request\n"); + + } + + + +req_err_2: + + async_req->complete(async_req, ret); +++======= ++ crypto_request_complete(async_req, ret); +++>>>>>>> bcd6e41d9836 (crypto: engine - Remove prepare/unprepare request) + + retry: + - if (backlog) + - crypto_request_complete(backlog, -EINPROGRESS); + - + /* If retry mechanism is supported, send new requests to engine */ + if (engine->retry_support) { + spin_lock_irqsave(&engine->queue_lock, flags); +diff --cc include/crypto/engine.h +index 7db1bae33021,1b02f69e0a79..000000000000 +--- a/include/crypto/engine.h ++++ b/include/crypto/engine.h +@@@ -10,17 -10,74 +10,20 @@@ + #include + #include + #include + -#include + #include + +#include + +#include + + +struct crypto_engine; + struct device; + + -#define ENGINE_NAME_LEN 30 + -/* + - * struct crypto_engine - crypto hardware engine + - * @name: the engine name + - * @idling: the engine is entering idle state + - * @busy: request pump is busy + - * @running: the engine is on working + - * @retry_support: indication that the hardware allows re-execution + - * of a failed backlog request + - * crypto-engine, in head position to keep order + - * @list: link with the global crypto engine list + - * @queue_lock: spinlock to synchronise access to request queue + - * @queue: the crypto queue of the engine + - * @rt: whether this queue is set to run as a realtime task + - * @prepare_crypt_hardware: a request will soon arrive from the queue + - * so the subsystem requests the driver to prepare the hardware + - * by issuing this call + - * @unprepare_crypt_hardware: there are currently no more requests on the + - * queue so the subsystem notifies the driver that it may relax the + - * hardware by issuing this call + - * @do_batch_requests: execute a batch of requests. Depends on multiple + - * requests support. + - * @kworker: kthread worker struct for request pump + - * @pump_requests: work struct for scheduling work to the request pump + - * @priv_data: the engine private data + - * @cur_req: the current request which is on processing + - */ + -struct crypto_engine { + - char name[ENGINE_NAME_LEN]; + - bool idling; + - bool busy; + - bool running; + - + - bool retry_support; + - + - struct list_head list; + - spinlock_t queue_lock; + - struct crypto_queue queue; + - struct device *dev; + - + - bool rt; + - + - int (*prepare_crypt_hardware)(struct crypto_engine *engine); + - int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + - int (*do_batch_requests)(struct crypto_engine *engine); + - + - + - struct kthread_worker *kworker; + - struct kthread_work pump_requests; + - + - void *priv_data; + - struct crypto_async_request *cur_req; + -}; + - + /* + * struct crypto_engine_op - crypto hardware engine operations +++<<<<<<< HEAD + + * @prepare__request: do some prepare if need before handle the current request + + * @unprepare_request: undo any work done by prepare_request() +++======= +++>>>>>>> bcd6e41d9836 (crypto: engine - Remove prepare/unprepare request) + * @do_one_request: do encryption for current request + */ + struct crypto_engine_op { +* Unmerged path crypto/crypto_engine.c +* Unmerged path include/crypto/engine.h diff --git a/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/rebuild.details.txt b/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/rebuild.details.txt new file mode 100644 index 0000000000000..444321349379a --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.41.1.el9_6/rebuild.details.txt @@ -0,0 +1,20 @@ +Rebuild_History BUILDABLE +Rebuilding Kernel from rpm changelog with Fuzz Limit: 87.50% +Number of commits in upstream range v5.14~1..kernel-mainline: 324124 +Number of commits in rpm: 26 +Number of commits matched with upstream: 24 (92.31%) +Number of commits in upstream but not in rpm: 324100 +Number of commits NOT found in upstream: 2 (7.69%) + +Rebuilding Kernel on Branch rocky9_6_rebuild_kernel-5.14.0-570.41.1.el9_6 for kernel-5.14.0-570.41.1.el9_6 +Clean Cherry Picks: 22 (91.67%) +Empty Cherry Picks: 2 (8.33%) +_______________________________ + +__EMPTY COMMITS__________________________ +bcd6e41d983621954dfc3f1f64249a55838b3e6a crypto: engine - Remove prepare/unprepare request +9448765397b6e6522dff485f4b480b7ee020a536 smb: convert to ctime accessor functions + +__CHANGES NOT IN UPSTREAM________________ +Porting to Rocky Linux 9, debranding and Rocky branding' +Ensure aarch64 kernel is not compressed' diff --git a/ciq/ciq_backports/kernel-5.14.0-570.42.2.el9_6/rebuild.details.txt b/ciq/ciq_backports/kernel-5.14.0-570.42.2.el9_6/rebuild.details.txt new file mode 100644 index 0000000000000..5354592f923d3 --- /dev/null +++ b/ciq/ciq_backports/kernel-5.14.0-570.42.2.el9_6/rebuild.details.txt @@ -0,0 +1,18 @@ +Rebuild_History BUILDABLE +Rebuilding Kernel from rpm changelog with Fuzz Limit: 87.50% +Number of commits in upstream range v5.14~1..kernel-mainline: 324124 +Number of commits in rpm: 9 +Number of commits matched with upstream: 7 (77.78%) +Number of commits in upstream but not in rpm: 324117 +Number of commits NOT found in upstream: 2 (22.22%) + +Rebuilding Kernel on Branch rocky9_6_rebuild_kernel-5.14.0-570.42.2.el9_6 for kernel-5.14.0-570.42.2.el9_6 +Clean Cherry Picks: 7 (100.00%) +Empty Cherry Picks: 0 (0.00%) +_______________________________ + +__EMPTY COMMITS__________________________ + +__CHANGES NOT IN UPSTREAM________________ +Porting to Rocky Linux 9, debranding and Rocky branding' +Ensure aarch64 kernel is not compressed' diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 5c4b4f02100af..053fa8e0577ad 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -41,9 +41,6 @@ static void crypto_finalize_request(struct crypto_engine *engine, struct crypto_async_request *req, int err) { unsigned long flags; - bool finalize_req = false; - int ret; - struct crypto_engine_ctx *enginectx; /* * If hardware cannot enqueue more requests @@ -53,21 +50,11 @@ static void crypto_finalize_request(struct crypto_engine *engine, if (!engine->retry_support) { spin_lock_irqsave(&engine->queue_lock, flags); if (engine->cur_req == req) { - finalize_req = true; engine->cur_req = NULL; } spin_unlock_irqrestore(&engine->queue_lock, flags); } - if (finalize_req || engine->retry_support) { - enginectx = crypto_tfm_ctx(req->tfm); - if (enginectx->op.prepare_request && - enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, req); - if (ret) - dev_err(engine->dev, "failed to unprepare request\n"); - } - } req->complete(req, err); kthread_queue_work(engine->kworker, &engine->pump_requests); @@ -160,7 +147,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, ret = engine->prepare_crypt_hardware(engine); if (ret) { dev_err(engine->dev, "failed to prepare crypt hardware\n"); - goto req_err_2; + goto req_err_1; } } @@ -170,16 +157,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, op = &alg->op; } else { enginectx = crypto_tfm_ctx(async_req->tfm); - op = &enginectx->op; - - if (op->prepare_request) { - ret = op->prepare_request(engine, async_req); - if (ret) { - dev_err(engine->dev, "failed to prepare request: %d\n", - ret); - goto req_err_2; - } - } + if (!op->do_one_request) { dev_err(engine->dev, "failed to do request\n"); ret = -EINVAL; @@ -203,18 +181,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, ret); goto req_err_1; } - /* - * If retry mechanism is supported, - * unprepare current request and - * enqueue it back into crypto-engine queue. - */ - if (enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, - async_req); - if (ret) - dev_err(engine->dev, - "failed to unprepare request\n"); - } spin_lock_irqsave(&engine->queue_lock, flags); /* * If hardware was unable to execute request, enqueue it @@ -230,13 +196,6 @@ static void crypto_pump_requests(struct crypto_engine *engine, goto retry; req_err_1: - if (enginectx->op.unprepare_request) { - ret = enginectx->op.unprepare_request(engine, async_req); - if (ret) - dev_err(engine->dev, "failed to unprepare request\n"); - } - -req_err_2: async_req->complete(async_req, ret); retry: diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index ae7a0f8435fc6..0e07d0523291a 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -28,6 +28,9 @@ struct tegra_aes_ctx { u32 ivsize; u32 key1_id; u32 key2_id; + u32 keylen; + u8 key1[AES_MAX_KEY_SIZE]; + u8 key2[AES_MAX_KEY_SIZE]; }; struct tegra_aes_reqctx { @@ -43,8 +46,9 @@ struct tegra_aead_ctx { struct tegra_se *se; unsigned int authsize; u32 alg; - u32 keylen; u32 key_id; + u32 keylen; + u8 key[AES_MAX_KEY_SIZE]; }; struct tegra_aead_reqctx { @@ -56,8 +60,8 @@ struct tegra_aead_reqctx { unsigned int cryptlen; unsigned int authsize; bool encrypt; - u32 config; u32 crypto_config; + u32 config; u32 key_id; u32 iv[4]; u8 authdata[16]; @@ -67,6 +71,8 @@ struct tegra_cmac_ctx { struct tegra_se *se; unsigned int alg; u32 key_id; + u32 keylen; + u8 key[AES_MAX_KEY_SIZE]; struct crypto_shash *fallback_tfm; }; @@ -260,17 +266,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); struct tegra_se *se = ctx->se; - unsigned int cmdlen; + unsigned int cmdlen, key1_id, key2_id; int ret; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - return -ENOMEM; - - rctx->datbuf.size = SE_AES_BUFLEN; - rctx->iv = (u32 *)req->iv; + rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv; rctx->len = req->cryptlen; + key1_id = ctx->key1_id; + key2_id = ctx->key2_id; /* Pad input to AES Block size */ if (ctx->alg != SE_ALG_XTS) { @@ -278,20 +280,59 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); } + rctx->datbuf.size = rctx->len; + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_finalize; + } + scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); + rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt); + + if (!key1_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1, + ctx->keylen, ctx->alg, &key1_id); + if (ret) + goto out; + } + + rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id); + + if (ctx->alg == SE_ALG_XTS) { + if (!key2_id) { + ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2, + ctx->keylen, ctx->alg, &key2_id); + if (ret) + goto out; + } + + rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id); + } + /* Prepare the command and submit for execution */ cmdlen = tegra_aes_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); /* Copy the result */ tegra_aes_update_iv(req, ctx); scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); +out: /* Free the buffer */ - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); + if (tegra_key_is_reserved(key1_id)) + tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg); + + if (tegra_key_is_reserved(key2_id)) + tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg); + +out_finalize: crypto_finalize_skcipher_request(se->engine, req, ret); return 0; @@ -313,6 +354,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm) ctx->se = se_alg->se_dev; ctx->key1_id = 0; ctx->key2_id = 0; + ctx->keylen = 0; algname = crypto_tfm_alg_name(&tfm->base); ret = se_algname_to_algid(algname); @@ -341,13 +383,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, u32 keylen) { struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key1, key, keylen); + } + + return 0; } static int tegra_xts_setkey(struct crypto_skcipher *tfm, @@ -365,11 +414,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm, ret = tegra_key_submit(ctx->se, key, len, ctx->alg, &ctx->key1_id); - if (ret) - return ret; + if (ret) { + ctx->keylen = len; + memcpy(ctx->key1, key, len); + } - return tegra_key_submit(ctx->se, key + len, len, + ret = tegra_key_submit(ctx->se, key + len, len, ctx->alg, &ctx->key2_id); + if (ret) { + ctx->keylen = len; + memcpy(ctx->key2, key + len, len); + } return 0; } @@ -444,12 +499,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) return 0; rctx->encrypt = encrypt; - rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); - rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); - - if (ctx->key2_id) - rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); } @@ -715,11 +764,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); cmdlen = tegra_gmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) @@ -732,11 +781,11 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -755,11 +804,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -886,12 +935,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) @@ -1073,7 +1122,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Copy authdata in the top of buffer for encryption/decryption */ if (rctx->encrypt) @@ -1098,7 +1147,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx /* Prepare command and submit */ cmdlen = tegra_ctr_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -1117,6 +1166,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, rctx->assoclen = req->assoclen; rctx->authsize = crypto_aead_authsize(tfm); + if (rctx->encrypt) + rctx->cryptlen = req->cryptlen; + else + rctx->cryptlen = req->cryptlen - rctx->authsize; + memcpy(iv, req->iv, 16); ret = tegra_ccm_check_iv(iv); @@ -1145,30 +1199,35 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret; + ret = tegra_ccm_crypt_init(req, se, rctx); + if (ret) + goto out_finalize; + + rctx->key_id = ctx->key_id; + /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; + goto out_finalize; - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; - goto outbuf_err; + goto out_free_inbuf; } - rctx->outbuf.size = SE_AES_BUFLEN; - - ret = tegra_ccm_crypt_init(req, se, rctx); - if (ret) - goto out; + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } if (rctx->encrypt) { - rctx->cryptlen = req->cryptlen; - /* CBC MAC Operation */ ret = tegra_ccm_compute_auth(ctx, rctx); if (ret) @@ -1179,10 +1238,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) if (ret) goto out; } else { - rctx->cryptlen = req->cryptlen - ctx->authsize; - if (ret) - goto out; - /* CTR operation */ ret = tegra_ccm_do_ctr(ctx, rctx); if (ret) @@ -1195,13 +1250,17 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) } out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); -outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, +out_free_inbuf: + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + +out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret); return 0; @@ -1215,23 +1274,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aead_reqctx *rctx = aead_request_ctx(req); int ret; - /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->outbuf.addr, GFP_KERNEL); - if (!rctx->outbuf.buf) { - ret = -ENOMEM; - goto outbuf_err; - } - - rctx->outbuf.size = SE_AES_BUFLEN; - rctx->src_sg = req->src; rctx->dst_sg = req->dst; rctx->assoclen = req->assoclen; @@ -1245,6 +1287,32 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); rctx->iv[3] = (1 << 24); + rctx->key_id = ctx->key_id; + + /* Allocate buffers required */ + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) { + ret = -ENOMEM; + goto out_finalize; + } + + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = -ENOMEM; + goto out_free_inbuf; + } + + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } + /* If there is associated data perform GMAC operation */ if (rctx->assoclen) { ret = tegra_gcm_do_gmac(ctx, rctx); @@ -1268,14 +1336,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) ret = tegra_gcm_do_verify(ctx->se, rctx); out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); -outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, +out_free_inbuf: + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); - /* Finalize the request if there are no errors */ + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + +out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret); return 0; @@ -1297,6 +1368,7 @@ static int tegra_aead_cra_init(struct crypto_aead *tfm) ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->keylen = 0; ret = se_algname_to_algid(algname); if (ret < 0) { @@ -1378,13 +1450,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm, const u8 *key, u32 keylen) { struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + } + + return 0; } static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx, @@ -1458,6 +1537,35 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct se->base + se->hw->regs->result + (i * 4)); } +static int tegra_cmac_do_init(struct ahash_request *req) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int i; + + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = ctx->key_id; + rctx->task |= SHA_FIRST; + rctx->blk_size = crypto_ahash_blocksize(tfm); + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + return -ENOMEM; + + rctx->residue.size = 0; + + /* Clear any previous result */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + return 0; +} + static int tegra_cmac_do_update(struct ahash_request *req) { struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); @@ -1485,7 +1593,7 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; rctx->total_len += rctx->datbuf.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); - rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id); + rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id); /* * Keep one block and residue bytes in residue and @@ -1499,6 +1607,11 @@ static int tegra_cmac_do_update(struct ahash_request *req) return 0; } + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -1513,23 +1626,19 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->residue.size = nresidue; /* - * If this is not the first 'update' call, paste the previous copied + * If this is not the first task, paste the previous copied * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) tegra_cmac_paste_result(ctx->se, rctx); cmdlen = tegra_cmac_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); - ret = tegra_se_host1x_submit(se, cmdlen); - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_cmac_copy_result(ctx->se, rctx); + tegra_cmac_copy_result(ctx->se, rctx); + + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); return ret; } @@ -1545,17 +1654,34 @@ static int tegra_cmac_do_final(struct ahash_request *req) if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { return crypto_shash_tfm_digest(ctx->fallback_tfm, - rctx->datbuf.buf, 0, req->result); + NULL, 0, req->result); + } + + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); } - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); + /* + * If this is not the first task, paste the previous copied + * intermediate results to the registers so that it gets picked up. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_cmac_paste_result(ctx->se, rctx); + /* Prepare command and submit */ cmdlen = tegra_cmac_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) goto out; @@ -1567,8 +1693,10 @@ static int tegra_cmac_do_final(struct ahash_request *req) writel(0, se->base + se->hw->regs->result + (i * 4)); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, rctx->residue.buf, rctx->residue.addr); return ret; @@ -1581,17 +1709,41 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - int ret; + int ret = 0; + + if (rctx->task & SHA_INIT) { + ret = tegra_cmac_do_init(req); + if (ret) + goto out; + + rctx->task &= ~SHA_INIT; + } + + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_cmac_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } +out: + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); crypto_finalize_hash_request(se->engine, req, ret); @@ -1633,6 +1785,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm) ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->keylen = 0; ret = se_algname_to_algid(algname); if (ret < 0) { @@ -1657,51 +1810,11 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm) tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } -static int tegra_cmac_init(struct ahash_request *req) -{ - struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); - struct tegra_se *se = ctx->se; - int i; - - rctx->total_len = 0; - rctx->datbuf.size = 0; - rctx->residue.size = 0; - rctx->task = SHA_FIRST; - rctx->blk_size = crypto_ahash_blocksize(tfm); - - rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, - &rctx->residue.addr, GFP_KERNEL); - if (!rctx->residue.buf) - goto resbuf_fail; - - rctx->residue.size = 0; - - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - rctx->datbuf.size = 0; - - /* Clear any previous result */ - for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - writel(0, se->base + se->hw->regs->result + (i * 4)); - - return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - return -ENOMEM; -} - static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); @@ -1711,7 +1824,24 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, if (ctx->fallback_tfm) crypto_shash_setkey(ctx->fallback_tfm, key, keylen); - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + } + + return 0; +} + +static int tegra_cmac_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task = SHA_INIT; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } static int tegra_cmac_update(struct ahash_request *req) @@ -1753,8 +1883,7 @@ static int tegra_cmac_digest(struct ahash_request *req) struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); - tegra_cmac_init(req); - rctx->task |= SHA_UPDATE | SHA_FINAL; + rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 4d4bd727f4986..42d007b7af45d 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -34,6 +34,7 @@ struct tegra_sha_reqctx { struct tegra_se_datbuf datbuf; struct tegra_se_datbuf residue; struct tegra_se_datbuf digest; + struct tegra_se_datbuf intr_res; unsigned int alg; unsigned int config; unsigned int total_len; @@ -211,9 +212,62 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out) return crypto_ahash_export(&rctx->fallback_req, out); } -static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, +static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, + struct tegra_sha_reqctx *rctx) +{ + __be32 *res_be = (__be32 *)rctx->intr_res.buf; + u32 *res = (u32 *)rctx->intr_res.buf; + int i = 0, j; + + cpuvaddr[i++] = 0; + cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT); + + for (j = 0; j < HASH_RESULT_REG_COUNT; j++) { + int idx = j; + + /* + * The initial, intermediate and final hash value of SHA-384, SHA-512 + * in SHA_HASH_RESULT registers follow the below layout of bytes. + * + * +---------------+------------+ + * | HASH_RESULT_0 | B4...B7 | + * +---------------+------------+ + * | HASH_RESULT_1 | B0...B3 | + * +---------------+------------+ + * | HASH_RESULT_2 | B12...B15 | + * +---------------+------------+ + * | HASH_RESULT_3 | B8...B11 | + * +---------------+------------+ + * | ...... | + * +---------------+------------+ + * | HASH_RESULT_14| B60...B63 | + * +---------------+------------+ + * | HASH_RESULT_15| B56...B59 | + * +---------------+------------+ + * + */ + if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512) + idx = (j % 2) ? j - 1 : j + 1; + + /* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial + * intermediate and final hash value when stored in + * SHA_HASH_RESULT registers, the byte order is NOT in + * little-endian. + */ + if (ctx->alg <= SE_ALG_SHA512) + cpuvaddr[i++] = be32_to_cpu(res_be[idx]); + else + cpuvaddr[i++] = res[idx]; + } + + return i; +} + +static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, struct tegra_sha_reqctx *rctx) { + struct tegra_se *se = ctx->se; u64 msg_len, msg_left; int i = 0; @@ -241,7 +295,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = upper_32_bits(msg_left); cpuvaddr[i++] = 0; cpuvaddr[i++] = 0; - cpuvaddr[i++] = host1x_opcode_setpayload(6); + cpuvaddr[i++] = host1x_opcode_setpayload(2); cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG); cpuvaddr[i++] = rctx->config; @@ -249,15 +303,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT; rctx->task &= ~SHA_FIRST; } else { - cpuvaddr[i++] = 0; + /* + * If it isn't the first task, program the HASH_RESULT register + * with the intermediate result from the previous task + */ + i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx); } + cpuvaddr[i++] = host1x_opcode_setpayload(4); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR); cpuvaddr[i++] = rctx->datbuf.addr; cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | SE_ADDR_HI_SZ(rctx->datbuf.size)); - cpuvaddr[i++] = rctx->digest.addr; - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | - SE_ADDR_HI_SZ(rctx->digest.size)); + + if (rctx->task & SHA_UPDATE) { + cpuvaddr[i++] = rctx->intr_res.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) | + SE_ADDR_HI_SZ(rctx->intr_res.size)); + } else { + cpuvaddr[i++] = rctx->digest.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | + SE_ADDR_HI_SZ(rctx->digest.size)); + } + if (rctx->key_id) { cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG); @@ -266,42 +334,72 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION); - cpuvaddr[i++] = SE_SHA_OP_WRSTALL | - SE_SHA_OP_START | + cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START | SE_SHA_OP_LASTBUF; cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); - dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x", - msg_len, msg_left, rctx->config); + dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x", + msg_len, msg_left, rctx->datbuf.size, rctx->config); return i; } -static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) +static int tegra_sha_do_init(struct ahash_request *req) { - int i; + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; - for (i = 0; i < HASH_RESULT_REG_COUNT; i++) - rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); -} + if (ctx->fallback) + return tegra_sha_fallback_init(req); -static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) -{ - int i; + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = ctx->key_id; + rctx->task |= SHA_FIRST; + rctx->alg = ctx->alg; + rctx->blk_size = crypto_ahash_blocksize(tfm); + rctx->digest.size = crypto_ahash_digestsize(tfm); - for (i = 0; i < HASH_RESULT_REG_COUNT; i++) - writel(rctx->result[i], - se->base + se->hw->regs->result + (i * 4)); + rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, + &rctx->digest.addr, GFP_KERNEL); + if (!rctx->digest.buf) + goto digbuf_fail; + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + goto resbuf_fail; + + rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4; + rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size, + &rctx->intr_res.addr, GFP_KERNEL); + if (!rctx->intr_res.buf) + goto intr_res_fail; + + return 0; + +intr_res_fail: + dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf, + rctx->residue.addr); +resbuf_fail: + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); +digbuf_fail: + return -ENOMEM; } static int tegra_sha_do_update(struct ahash_request *req) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct tegra_se *se = ctx->se; unsigned int nblks, nresidue, size, ret; - u32 *cpuvaddr = ctx->se->cmdbuf->addr; + u32 *cpuvaddr = se->cmdbuf->addr; nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; @@ -317,7 +415,6 @@ static int tegra_sha_do_update(struct ahash_request *req) rctx->src_sg = req->src; rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; - rctx->total_len += rctx->datbuf.size; /* * If nbytes are less than a block size, copy it residue and @@ -326,11 +423,16 @@ static int tegra_sha_do_update(struct ahash_request *req) if (nblks < 1) { scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, rctx->src_sg, 0, req->nbytes, 0); - rctx->residue.size += req->nbytes; + return 0; } + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -343,29 +445,16 @@ static int tegra_sha_do_update(struct ahash_request *req) /* Update residue value with the residue after current block */ rctx->residue.size = nresidue; + rctx->total_len += rctx->datbuf.size; rctx->config = tegra_sha_get_config(rctx->alg) | - SE_SHA_DST_HASH_REG; + SE_SHA_DST_MEMORY; - /* - * If this is not the first 'update' call, paste the previous copied - * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FIRST)) - tegra_sha_paste_hash_result(ctx->se, rctx); + size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); - size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); - - ret = tegra_se_host1x_submit(ctx->se, size); - - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_sha_copy_hash_result(ctx->se, rctx); + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); return ret; } @@ -379,16 +468,25 @@ static int tegra_sha_do_final(struct ahash_request *req) u32 *cpuvaddr = se->cmdbuf->addr; int size, ret = 0; - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + } + rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; rctx->config = tegra_sha_get_config(rctx->alg) | SE_SHA_DST_MEMORY; - size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - - ret = tegra_se_host1x_submit(se, size); + size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); if (ret) goto out; @@ -396,12 +494,18 @@ static int tegra_sha_do_final(struct ahash_request *req) memcpy(req->result, rctx->digest.buf, rctx->digest.size); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), rctx->residue.buf, rctx->residue.addr); dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, rctx->digest.addr); + + dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf, + rctx->intr_res.addr); + return ret; } @@ -414,16 +518,31 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret = 0; + if (rctx->task & SHA_INIT) { + ret = tegra_sha_do_init(req); + if (ret) + goto out; + + rctx->task &= ~SHA_INIT; + } + if (rctx->task & SHA_UPDATE) { ret = tegra_sha_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_sha_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } +out: crypto_finalize_hash_request(se->engine, req, ret); return 0; @@ -497,52 +616,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm) tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } -static int tegra_sha_init(struct ahash_request *req) -{ - struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); - struct tegra_se *se = ctx->se; - - if (ctx->fallback) - return tegra_sha_fallback_init(req); - - rctx->total_len = 0; - rctx->datbuf.size = 0; - rctx->residue.size = 0; - rctx->key_id = ctx->key_id; - rctx->task = SHA_FIRST; - rctx->alg = ctx->alg; - rctx->blk_size = crypto_ahash_blocksize(tfm); - rctx->digest.size = crypto_ahash_digestsize(tfm); - - rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, - &rctx->digest.addr, GFP_KERNEL); - if (!rctx->digest.buf) - goto digbuf_fail; - - rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, - &rctx->residue.addr, GFP_KERNEL); - if (!rctx->residue.buf) - goto resbuf_fail; - - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, - rctx->datbuf.addr); -digbuf_fail: - return -ENOMEM; -} - static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key, unsigned int keylen) { @@ -559,13 +632,29 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) return tegra_hmac_fallback_setkey(ctx, key, keylen); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) + return tegra_hmac_fallback_setkey(ctx, key, keylen); + ctx->fallback = false; - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + return 0; +} + +static int tegra_sha_init(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + rctx->task = SHA_INIT; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } static int tegra_sha_update(struct ahash_request *req) @@ -619,8 +708,7 @@ static int tegra_sha_digest(struct ahash_request *req) if (ctx->fallback) return tegra_sha_fallback_digest(req); - tegra_sha_init(req); - rctx->task |= SHA_UPDATE | SHA_FINAL; + rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index ac14678dbd30d..956fa9b4e9b1a 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key, u32 keylen, u16 slot, u32 alg) { const u32 *keyval = (u32 *)key; - u32 *addr = se->cmdbuf->addr, size; + u32 *addr = se->keybuf->addr, size; + int ret; + + mutex_lock(&kslt_lock); size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg); + ret = tegra_se_host1x_submit(se, se->keybuf, size); + + mutex_unlock(&kslt_lock); - return tegra_se_host1x_submit(se, size); + return ret; } void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) @@ -135,6 +141,23 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) tegra_keyslot_free(keyid); } +void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg) +{ + u8 zkey[AES_MAX_KEY_SIZE] = {0}; + + if (!keyid) + return; + + /* Overwrite the key with 0s */ + tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg); +} + +inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + return tegra_key_insert(se, key, keylen, *keyid, alg); +} + int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid) { int ret; @@ -143,7 +166,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3 if (!tegra_key_in_kslt(*keyid)) { *keyid = tegra_keyslot_alloc(); if (!(*keyid)) { - dev_err(se->dev, "failed to allocate key slot\n"); + dev_dbg(se->dev, "failed to allocate key slot\n"); return -ENOMEM; } } diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index f94c0331b148c..63afb0556acf1 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi return cmdbuf; } -int tegra_se_host1x_submit(struct tegra_se *se, u32 size) +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size) { struct host1x_job *job; int ret; @@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) job->engine_fallback_streamid = se->stream_id; job->engine_streamid_offset = SE_STREAM_ID; - se->cmdbuf->words = size; + cmdbuf->words = size; - host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0); + host1x_job_add_gather(job, &cmdbuf->bo, size, 0); ret = host1x_job_pin(job, se->dev); if (ret) { @@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client) goto syncpt_put; } + se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K); + if (!se->keybuf) { + ret = -ENOMEM; + goto cmdbuf_put; + } + ret = se->hw->init_alg(se); if (ret) { dev_err(se->dev, "failed to register algorithms\n"); - goto cmdbuf_put; + goto keybuf_put; } return 0; +keybuf_put: + tegra_se_cmdbuf_put(&se->keybuf->bo); cmdbuf_put: tegra_se_cmdbuf_put(&se->cmdbuf->bo); syncpt_put: @@ -312,7 +320,6 @@ static int tegra_se_probe(struct platform_device *pdev) ret = tegra_se_host1x_register(se); if (ret) { - crypto_engine_stop(se->engine); crypto_engine_exit(se->engine); return dev_err_probe(dev, ret, "failed to init host1x params\n"); } @@ -324,7 +331,6 @@ static void tegra_se_remove(struct platform_device *pdev) { struct tegra_se *se = platform_get_drvdata(pdev); - crypto_engine_stop(se->engine); crypto_engine_exit(se->engine); host1x_client_unregister(&se->client); } diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index b9dd7ceb8783c..b6cac9384f666 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -24,6 +24,7 @@ #define SE_STREAM_ID 0x90 #define SE_SHA_CFG 0x4004 +#define SE_SHA_IN_ADDR 0x400c #define SE_SHA_KEY_ADDR 0x4094 #define SE_SHA_KEY_DATA 0x4098 #define SE_SHA_KEYMANIFEST 0x409c @@ -340,12 +341,14 @@ #define SE_CRYPTO_CTR_REG_COUNT 4 #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M -#define SE_AES_BUFLEN 0x8000 -#define SE_SHA_BUFLEN 0x2000 + +#define TEGRA_AES_RESERVED_KSLT 14 +#define TEGRA_XTS_RESERVED_KSLT 15 #define SHA_FIRST BIT(0) -#define SHA_UPDATE BIT(1) -#define SHA_FINAL BIT(2) +#define SHA_INIT BIT(1) +#define SHA_UPDATE BIT(2) +#define SHA_FINAL BIT(3) /* Security Engine operation modes */ enum se_aes_alg { @@ -420,6 +423,7 @@ struct tegra_se { struct host1x_client client; struct host1x_channel *channel; struct tegra_se_cmdbuf *cmdbuf; + struct tegra_se_cmdbuf *keybuf; struct crypto_engine *engine; struct host1x_syncpt *syncpt; struct device *dev; @@ -501,8 +505,33 @@ void tegra_deinit_aes(struct tegra_se *se); void tegra_deinit_hash(struct tegra_se *se); int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); + +int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid); + void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); -int tegra_se_host1x_submit(struct tegra_se *se, u32 size); +void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg); +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size); + +static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + *keyid = TEGRA_AES_RESERVED_KSLT; + return tegra_key_submit_reserved(se, key, keylen, alg, keyid); +} + +static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + *keyid = TEGRA_XTS_RESERVED_KSLT; + return tegra_key_submit_reserved(se, key, keylen, alg, keyid); +} + +static inline bool tegra_key_is_reserved(u32 keyid) +{ + return ((keyid == TEGRA_AES_RESERVED_KSLT) || + (keyid == TEGRA_XTS_RESERVED_KSLT)); +} /* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index eb1cc0dc11a7d..0da5ddb908a3c 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -334,7 +334,7 @@ static long udmabuf_create(struct miscdevice *device, if (!ubuf) return -ENOMEM; - pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; + pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; for (i = 0; i < head->count; i++) { if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) goto err; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 888aadb6a4acb..d6550b54fac16 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -860,11 +860,23 @@ void drm_framebuffer_free(struct kref *kref) int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { + unsigned int i; int ret; + bool exists; if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) return -EINVAL; + for (i = 0; i < fb->format->num_planes; i++) { + if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))) + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + if (fb->obj[i]) { + exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]); + if (exists) + fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } + INIT_LIST_HEAD(&fb->filp_head); fb->funcs = funcs; @@ -873,7 +885,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); if (ret) - goto out; + goto err; mutex_lock(&dev->mode_config.fb_lock); dev->mode_config.num_fb++; @@ -881,7 +893,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, mutex_unlock(&dev->mode_config.fb_lock); drm_mode_object_register(dev, &fb->base); -out: + + return 0; + +err: + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) { + drm_gem_object_handle_put_unlocked(fb->obj[i]); + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } return ret; } EXPORT_SYMBOL(drm_framebuffer_init); @@ -958,6 +979,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; + unsigned int i; + + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) + drm_gem_object_handle_put_unlocked(fb->obj[i]); + } mutex_lock(&dev->mode_config.fb_lock); list_del(&fb->head); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 149b8e25da5bb..ae9e671bfc95c 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_private_object_fini); +static void drm_gem_object_handle_get(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); + + if (obj->handle_count++ == 0) + drm_gem_object_get(obj); +} + +/** + * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any + * @obj: GEM object + * + * Acquires a reference on the GEM buffer object's handle. Required to keep + * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() + * to release the reference. Does nothing if the buffer object has no handle. + * + * Returns: + * True if a handle exists, or false otherwise + */ +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + guard(mutex)(&dev->object_name_lock); + + /* + * First ref taken during GEM object creation, if any. Some + * drivers set up internal framebuffers with GEM objects that + * do not have a GEM handle. Hence, this counter can be zero. + */ + if (!obj->handle_count) + return false; + + drm_gem_object_handle_get(obj); + + return true; +} + /** * drm_gem_object_handle_free - release resources bound to userspace handles * @obj: GEM object to clean up. @@ -216,20 +256,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) } } -static void -drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) +/** + * drm_gem_object_handle_put_unlocked - releases reference on user-space handle + * @obj: GEM object + * + * Releases a reference on the GEM buffer object's handle. Possibly releases + * the GEM buffer object and associated dma-buf objects. + */ +void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; bool final = false; - if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) + if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) return; /* - * Must bump handle count first as this may be the last - * ref, in which case the object would disappear before we - * checked for a name - */ + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before + * we checked for a name. + */ mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { @@ -363,8 +409,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, int ret; WARN_ON(!mutex_is_locked(&dev->object_name_lock)); - if (obj->handle_count++ == 0) - drm_gem_object_get(obj); + + drm_gem_object_handle_get(obj); /* * Get the user-visible handle using idr. Preload and perform diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 1705bfc90b1e7..98b73c581c426 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -153,6 +153,8 @@ void drm_sysfs_lease_event(struct drm_device *dev); /* drm_gem.c */ int drm_gem_init(struct drm_device *dev); +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj); +void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj); int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 0c1a713b7b7b3..be642ee739c4f 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -245,17 +245,19 @@ static int __init vkms_init(void) if (!config) return -ENOMEM; - default_config = config; - config->cursor = enable_cursor; config->writeback = enable_writeback; config->overlay = enable_overlay; ret = vkms_create(config); - if (ret) + if (ret) { kfree(config); + return ret; + } - return ret; + default_config = config; + + return 0; } static void vkms_destroy(struct vkms_config *config) @@ -279,9 +281,10 @@ static void vkms_destroy(struct vkms_config *config) static void __exit vkms_exit(void) { - if (default_config->dev) - vkms_destroy(default_config); + if (!default_config) + return; + vkms_destroy(default_config); kfree(default_config); } diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index d8bf359fdd69a..04192190bebab 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -667,8 +667,7 @@ static int ibmveth_close(struct net_device *netdev) napi_disable(&adapter->napi); - if (!adapter->pool_config) - netif_tx_stop_all_queues(netdev); + netif_tx_stop_all_queues(netdev); h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); @@ -776,9 +775,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) if (netif_running(dev)) { restart = 1; - adapter->pool_config = 1; ibmveth_close(dev); - adapter->pool_config = 0; } set_attr = 0; @@ -860,9 +857,7 @@ static int ibmveth_set_tso(struct net_device *dev, u32 data) if (netif_running(dev)) { restart = 1; - adapter->pool_config = 1; ibmveth_close(dev); - adapter->pool_config = 0; } set_attr = 0; @@ -1512,9 +1507,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) only the buffer pools necessary to hold the new MTU */ if (netif_running(adapter->netdev)) { need_restart = 1; - adapter->pool_config = 1; ibmveth_close(adapter->netdev); - adapter->pool_config = 0; } /* Look for an active buffer pool that can hold the new MTU */ @@ -1678,7 +1671,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p); - adapter->pool_config = 0; ibmveth_init_link_settings(netdev); netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16); @@ -1810,20 +1802,22 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, long value = simple_strtol(buf, NULL, 10); long rc; + rtnl_lock(); + if (attr == &veth_active_attr) { if (value && !pool->active) { if (netif_running(netdev)) { if (ibmveth_alloc_buffer_pool(pool)) { netdev_err(netdev, "unable to alloc pool\n"); - return -ENOMEM; + rc = -ENOMEM; + goto unlock_err; } pool->active = 1; - adapter->pool_config = 1; ibmveth_close(netdev); - adapter->pool_config = 0; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->active = 1; } @@ -1843,54 +1837,59 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, if (i == IBMVETH_NUM_BUFF_POOLS) { netdev_err(netdev, "no active pool >= MTU\n"); - return -EPERM; + rc = -EPERM; + goto unlock_err; } if (netif_running(netdev)) { - adapter->pool_config = 1; ibmveth_close(netdev); pool->active = 0; - adapter->pool_config = 0; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } pool->active = 0; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { - return -EINVAL; + rc = -EINVAL; + goto unlock_err; } else { if (netif_running(netdev)) { - adapter->pool_config = 1; ibmveth_close(netdev); - adapter->pool_config = 0; pool->size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->size = value; } } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { - return -EINVAL; + rc = -EINVAL; + goto unlock_err; } else { if (netif_running(netdev)) { - adapter->pool_config = 1; ibmveth_close(netdev); - adapter->pool_config = 0; pool->buff_size = value; - if ((rc = ibmveth_open(netdev))) - return rc; + rc = ibmveth_open(netdev); + if (rc) + goto unlock_err; } else { pool->buff_size = value; } } } + rtnl_unlock(); /* kick the interrupt handler to allocate/deallocate pools */ ibmveth_interrupt(netdev->irq, netdev); return count; + +unlock_err: + rtnl_unlock(); + return rc; } diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 115d4c45aa77d..8468e2c59d7a4 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -147,7 +147,6 @@ struct ibmveth_adapter { dma_addr_t filter_list_dma; struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; struct ibmveth_rx_q rx_queue; - int pool_config; int rx_csum; int large_send; bool is_active_trunk; diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c index 4849590a5591f..f29f94a1d7f2b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) */ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* free ring buffers and the ring itself */ idpf_ctlq_dealloc_ring_res(hw, cq); @@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) /* Set ring_size to 0 to indicate uninitialized queue */ cq->ring_size = 0; - mutex_unlock(&cq->cq_lock); - mutex_destroy(&cq->cq_lock); + spin_unlock(&cq->cq_lock); } /** @@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, idpf_ctlq_init_regs(hw, cq, is_rxq); - mutex_init(&cq->cq_lock); + spin_lock_init(&cq->cq_lock); list_add(&cq->cq_list, &hw->cq_list_head); @@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, int err = 0; int i; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* Ensure there are enough descriptors to send all messages */ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); @@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, wr32(hw, cq->reg.tail, cq->next_to_use); err_unlock: - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); return err; } @@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, if (*clean_count > cq->ring_size) return -EBADR; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -394,7 +393,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* Return number of descriptors actually cleaned */ *clean_count = i; @@ -432,7 +431,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, if (*buff_count > 0) buffs_avail = true; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); if (tbp >= cq->ring_size) tbp = 0; @@ -521,7 +520,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, wr32(hw, cq->reg.tail, cq->next_to_post); } - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* return the number of buffers that were not posted */ *buff_count = *buff_count - i; @@ -549,7 +548,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, u16 i; /* take the lock before we start messing with the ring */ - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -608,7 +607,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); *num_q_msg = i; if (*num_q_msg == 0) diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h index e8e046ef2f0d7..5890d8adca4a8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -99,7 +99,7 @@ struct idpf_ctlq_info { enum idpf_ctlq_type cq_type; int q_id; - struct mutex cq_lock; /* control queue lock */ + spinlock_t cq_lock; /* control queue lock */ /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index a3d6b8f198a86..5393d05402fc9 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -2321,8 +2321,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) struct idpf_adapter *adapter = hw->back; size_t sz = ALIGN(size, 4096); - mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, - &mem->pa, GFP_KERNEL); + /* The control queue resources are freed under a spinlock, contiguous + * pages will avoid IOMMU remapping and the use vmap (and vunmap in + * dma_free_*() path. + */ + mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = sz; return mem->va; @@ -2337,8 +2341,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) { struct idpf_adapter *adapter = hw->back; - dma_free_coherent(&adapter->pdev->dev, mem->size, - mem->va, mem->pa); + dma_free_attrs(&adapter->pdev->dev, mem->size, + mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = 0; mem->va = NULL; mem->pa = 0; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6168fc34ff363..d08829b658f5f 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -6006,9 +6006,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); - memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); - strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, + memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); + phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0'; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 254347d16db68..21a887f3a36fa 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -1066,7 +1066,7 @@ int cifs_close(struct inode *inode, struct file *file) if ((cfile->status_file_deleted == false) && (smb2_can_defer_close(inode, dclose))) { if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { - inode->i_ctime = inode->i_mtime = current_time(inode); + inode->i_mtime = inode_set_ctime_current(inode); } spin_lock(&cinode->deferred_lock); cifs_add_deferred_close(cfile, dclose); @@ -2634,7 +2634,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) write_data, to - from, &offset); cifsFileInfo_put(open_file); /* Does mm or vfs already set times? */ - inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); + inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); if ((bytes_written > 0) && (offset)) rc = 0; else if (bytes_written < 0) diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h index c691b98b442a6..82e7f7a154914 100644 --- a/fs/smb/client/fscache.h +++ b/fs/smb/client/fscache.h @@ -50,12 +50,13 @@ void cifs_fscache_fill_coherency(struct inode *inode, struct cifs_fscache_inode_coherency_data *cd) { struct cifsInodeInfo *cifsi = CIFS_I(inode); + struct timespec64 ctime = inode_get_ctime(inode); memset(cd, 0, sizeof(*cd)); cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec); cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec); - cd->last_change_time_sec = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec); - cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec); + cd->last_change_time_sec = cpu_to_le64(ctime.tv_sec); + cd->last_change_time_nsec = cpu_to_le32(ctime.tv_nsec); } diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index 83d1b0b9f5c11..72ba96c160799 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -170,7 +170,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr, else inode->i_atime = fattr->cf_atime; inode->i_mtime = fattr->cf_mtime; - inode->i_ctime = fattr->cf_ctime; + inode_set_ctime_to_ts(inode, fattr->cf_ctime); inode->i_rdev = fattr->cf_rdev; cifs_nlink_fattr_to_inode(inode, fattr); inode->i_uid = fattr->cf_uid; @@ -1902,15 +1902,24 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *tcon; + __u32 dosattr = 0, origattr = 0; struct TCP_Server_Info *server; struct iattr *attrs = NULL; - __u32 dosattr = 0, origattr = 0; + bool rehash = false; cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry); if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; + /* Unhash dentry in advance to prevent any concurrent opens */ + spin_lock(&dentry->d_lock); + if (!d_unhashed(dentry)) { + __d_drop(dentry); + rehash = true; + } + spin_unlock(&dentry->d_lock); + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -1961,7 +1970,8 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) cifs_drop_nlink(inode); } } else if (rc == -ENOENT) { - d_drop(dentry); + if (simple_positive(dentry)) + d_delete(dentry); } else if (rc == -EBUSY) { if (server->ops->rename_pending_delete) { rc = server->ops->rename_pending_delete(full_path, @@ -2004,9 +2014,9 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) cifs_inode = CIFS_I(inode); cifs_inode->time = 0; /* will force revalidate to get info when needed */ - inode->i_ctime = current_time(inode); + inode_set_ctime_current(inode); } - dir->i_ctime = dir->i_mtime = current_time(dir); + dir->i_mtime = inode_set_ctime_current(dir); cifs_inode = CIFS_I(dir); CIFS_I(dir)->time = 0; /* force revalidate of dir as well */ unlink_out: @@ -2014,6 +2024,8 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) kfree(attrs); free_xid(xid); cifs_put_tlink(tlink); + if (rehash) + d_rehash(dentry); return rc; } @@ -2322,8 +2334,8 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) */ cifsInode->time = 0; - d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime = - current_time(inode); + inode_set_ctime_current(d_inode(direntry)); + inode->i_mtime = inode_set_ctime_current(inode); rmdir_exit: free_dentry_path(page); @@ -2420,6 +2432,7 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; + bool rehash = false; unsigned int xid; int rc, tmprc; int retry_count = 0; @@ -2435,6 +2448,17 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; + /* + * Prevent any concurrent opens on the target by unhashing the dentry. + * VFS already unhashes the target when renaming directories. + */ + if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) { + if (!d_unhashed(target_dentry)) { + d_drop(target_dentry); + rehash = true; + } + } + tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -2474,6 +2498,8 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, } } + if (!rc) + rehash = false; /* * No-replace is the natural behavior for CIFS, so skip unlink hacks. */ @@ -2532,15 +2558,19 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir, goto cifs_rename_exit; rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, to_name); + if (!rc) + rehash = false; } /* force revalidate to go get info when needed */ CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; - source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime = - target_dir->i_mtime = current_time(source_dir); + source_dir->i_mtime = target_dir->i_mtime = inode_set_ctime_to_ts(source_dir, + inode_set_ctime_current(target_dir)); cifs_rename_exit: + if (rehash) + d_rehash(target_dentry); kfree(info_buf_source); free_dentry_path(page2); free_dentry_path(page1); diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index ecd0b5c79c22d..07283c98c5759 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -1443,7 +1443,8 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon, if (file_inf.LastWriteTime) inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime); if (file_inf.ChangeTime) - inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime); + inode_set_ctime_to_ts(inode, + cifs_NTtimeToUnix(file_inf.ChangeTime)); if (file_inf.LastAccessTime) inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime); diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 7db1bae33021c..cf57e732566bb 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -19,15 +19,9 @@ struct device; /* * struct crypto_engine_op - crypto hardware engine operations - * @prepare__request: do some prepare if need before handle the current request - * @unprepare_request: undo any work done by prepare_request() * @do_one_request: do encryption for current request */ struct crypto_engine_op { - int (*prepare_request)(struct crypto_engine *engine, - void *areq); - int (*unprepare_request)(struct crypto_engine *engine, - void *areq); int (*do_one_request)(struct crypto_engine *engine, void *areq); }; diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 668077009fced..38b24fc8978d3 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -23,6 +23,7 @@ #ifndef __DRM_FRAMEBUFFER_H__ #define __DRM_FRAMEBUFFER_H__ +#include #include #include #include @@ -100,6 +101,8 @@ struct drm_framebuffer_funcs { unsigned num_clips); }; +#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i)) + /** * struct drm_framebuffer - frame buffer object * @@ -188,6 +191,10 @@ struct drm_framebuffer { * DRM_MODE_FB_MODIFIERS. */ int flags; + /** + * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF. + */ + unsigned int internal_flags; /** * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index cb925e8ef9a8b..677311326429f 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -1379,6 +1379,15 @@ void run_posix_cpu_timers(void) lockdep_assert_irqs_disabled(); + /* + * Ensure that release_task(tsk) can't happen while + * handle_posix_cpu_timers() is running. Otherwise, a concurrent + * posix_cpu_timer_del() may fail to lock_task_sighand(tsk) and + * miss timer->it.cpu.firing != 0. + */ + if (tsk->exit_state) + return; + /* * If the actual expiry is deferred to task work context and the * work is already scheduled there is no point to do anything here. diff --git a/redhat/kernel.changelog-9.6 b/redhat/kernel.changelog-9.6 index ab6a19d5fe980..5855f0455d960 100644 --- a/redhat/kernel.changelog-9.6 +++ b/redhat/kernel.changelog-9.6 @@ -1,3 +1,46 @@ +* Mon Sep 08 2025 Patrick Talbert [5.14.0-570.42.2.el9_6] +- posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del() (CKI Backport Bot) [RHEL-112780] {CVE-2025-38352} +- powerpc/pseries/iommu: create DDW for devices with DMA mask less than 64-bits (CKI Backport Bot) [RHEL-113173] +Resolves: RHEL-112780, RHEL-113173 + +* Sat Aug 30 2025 CKI KWF Bot [5.14.0-570.42.1.el9_6] +- powerpc/pseries/iommu: memory notifier incorrectly adds TCEs for pmemory (Mamatha Inamdar) [RHEL-103015] +- drm/framebuffer: Acquire internal references on GEM handles (José Expósito) [RHEL-106699] {CVE-2025-38449} +- drm/gem: Acquire references on GEM handles for framebuffers (José Expósito) [RHEL-106699] {CVE-2025-38449} +- drm/vkms: Fix use after free and double free on init error (CKI KWF BOT) [RHEL-99420] {CVE-2025-22097} +- scsi: lpfc: Use memcpy() for BIOS version (Ewan D. Milne) [RHEL-105933] {CVE-2025-38332} +Resolves: RHEL-103015, RHEL-105933, RHEL-106699, RHEL-99420 + +* Thu Aug 28 2025 CKI KWF Bot [5.14.0-570.41.1.el9_6] +- powerpc/pseries/iommu: Fix kmemleak in TCE table userspace view (Mamatha Inamdar) [RHEL-107002] +- net: ibmveth: make veth_pool_store stop hanging (Mamatha Inamdar) [RHEL-109494] +- ibmveth: Always stop tx queues during close (Mamatha Inamdar) [RHEL-109494] +- smb: client: fix race with concurrent opens in rename(2) (Paulo Alcantara) [RHEL-109723] +- smb: client: fix race with concurrent opens in unlink(2) (Paulo Alcantara) [RHEL-109723] +- smb: convert to ctime accessor functions (Paulo Alcantara) [RHEL-109723] +- crypto: tegra - Fix IV usage for AES ECB (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Fix format specifier in tegra_sha_prep_cmd() (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Use HMAC fallback when keyslots are full (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Reserve keyslots to allocate dynamically (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Set IV to NULL explicitly for AES ECB (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Fix CMAC intermediate result handling (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Fix HASH intermediate result handling (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Transfer HASH init function to crypto engine (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - check return value for hash do_one_req (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - finalize crypto req on error (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Do not use fixed size buffers (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - Use separate buffer for setkey (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - remove unneeded crypto_engine_stop() call (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - remove redundant error check on ret (Nirmala Dalvi) [RHEL-107286] +- crypto: tegra - do not transfer req when tegra init fails (Nirmala Dalvi) [RHEL-107286] +- crypto: engine - Remove prepare/unprepare request (Nirmala Dalvi) [RHEL-107286] +- udmabuf: fix a buf size overflow issue during udmabuf creation (CKI Backport Bot) [RHEL-99746] {CVE-2025-37803} +Resolves: RHEL-107002, RHEL-107286, RHEL-109494, RHEL-109723, RHEL-99746 + +* Wed Aug 27 2025 CKI KWF Bot [5.14.0-570.40.1.el9_6] +- idpf: convert control queue mutex to a spinlock (CKI Backport Bot) [RHEL-106054] {CVE-2025-38392} +Resolves: RHEL-106054 + * Sat Aug 23 2025 CKI KWF Bot [5.14.0-570.39.1.el9_6] - xfrm: interface: fix use-after-free after changing collect_md xfrm interface (CKI Backport Bot) [RHEL-109529] {CVE-2025-38500} - Merge: net: mana: Fix race of mana_hwc_post_rx_wqe and new hwc response [rhel-9.6.z] (Maxim Levitsky) [RHEL-58904]