Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Browse files Browse the repository at this point in the history
Pull crypto fixes from Herbert Xu:
 "This fixes the following issues:

   - a regression caused by the conversion of IPsec ESP to the new AEAD
     interface: ESN with authencesn no longer works because it relied on
     the AD input SG list having a specific layout which is no longer
     the case.  In linux-next authencesn is fixed properly and no longer
     assumes anything about the SG list format.  While for this release
     a minimal fix is applied to authencesn so that it works with the
     new linear layout.

   - fix memory corruption caused by bogus index in the caam hash code.

   - fix powerpc nx SHA hashing which could cause module load failures
     if module signature verification is enabled"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: caam - fix memory corruption in ahash_final_ctx
  crypto: nx - respect sg limit bounds when building sg lists for SHA
  crypto: authencesn - Fix breakage with new ESP code
  • Loading branch information
torvalds committed Aug 17, 2015
2 parents 2c6625c + b310c17 commit a36304b
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 58 deletions.
44 changes: 12 additions & 32 deletions crypto/authencesn.c
Expand Up @@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen;
struct page *dstp;
Expand All @@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
cryptlen += ivsize;
}

if (sg_is_last(assoc))
return -EINVAL;

assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;

assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
if (assoc->length < 12)
return -EINVAL;

sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);

sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);

areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length;
areq_ctx->trailen = assoc1->length;
areq_ctx->headlen = 8;
areq_ctx->trailen = 4;
areq_ctx->sg = dst;

areq_ctx->complete = authenc_esn_geniv_ahash_done;
Expand Down Expand Up @@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp;
u8 *vsrc;
Expand All @@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
cryptlen += ivsize;
}

if (sg_is_last(assoc))
return -EINVAL;

assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;

assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
if (assoc->length < 12)
return -EINVAL;

sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);

sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);

areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length;
areq_ctx->trailen = assoc1->length;
areq_ctx->headlen = 8;
areq_ctx->trailen = 4;
areq_ctx->sg = src;

areq_ctx->complete = authenc_esn_verify_ahash_done;
Expand Down
7 changes: 4 additions & 3 deletions drivers/crypto/caam/caamhash.c
Expand Up @@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
int sec4_sg_bytes;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;

sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);

/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
Expand All @@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;

edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
Expand Down
27 changes: 16 additions & 11 deletions drivers/crypto/nx/nx-sha256.c
Expand Up @@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
Expand All @@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
Expand All @@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
}

do {
/*
* to_process: the SHA256_BLOCK_SIZE data chunk to process in
* this update. This value is also restricted by the sg list
* limits.
*/
to_process = total - to_process;
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;

if (buf_len) {
data_len = buf_len;
in_sg = nx_build_sg_list(nx_ctx->in_sg,
in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len,
max_sg_len);
Expand All @@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
rc = -EINVAL;
goto out;
}
used_sgs = in_sg - nx_ctx->in_sg;
}

/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
* processed in this iteration. This value is restricted
* by sg list limits and number of sgs we already used
* for leftover data. (see above)
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
* but because data may not be aligned, we need to account
* for that too. */
to_process = min_t(u64, total,
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);

data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);

nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);

to_process = (data_len + buf_len);
to_process = data_len + buf_len;
leftover = total - to_process;

/*
Expand Down
28 changes: 16 additions & 12 deletions drivers/crypto/nx/nx-sha512.c
Expand Up @@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
Expand All @@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
Expand All @@ -114,27 +112,33 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
}

do {
/*
* to_process: the SHA512_BLOCK_SIZE data chunk to process in
* this update. This value is also restricted by the sg list
* limits.
*/
to_process = total - leftover;
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
leftover = total - to_process;
int used_sgs = 0;
struct nx_sg *in_sg = nx_ctx->in_sg;

if (buf_len) {
data_len = buf_len;
in_sg = nx_build_sg_list(nx_ctx->in_sg,
in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len, max_sg_len);

if (data_len != buf_len) {
rc = -EINVAL;
goto out;
}
used_sgs = in_sg - nx_ctx->in_sg;
}

/* to_process: SHA512_BLOCK_SIZE aligned chunk to be
* processed in this iteration. This value is restricted
* by sg list limits and number of sgs we already used
* for leftover data. (see above)
* In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
* but because data may not be aligned, we need to account
* for that too. */
to_process = min_t(u64, total,
(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);

data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
Expand All @@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}

to_process = (data_len + buf_len);
to_process = data_len + buf_len;
leftover = total - to_process;

/*
Expand Down

0 comments on commit a36304b

Please sign in to comment.