diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 5f3c22d..d4a88bb 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -939,6 +939,7 @@ static int rfc4106_set_authsize(struct crypto_aead *parent, static int helper_rfc4106_encrypt(struct aead_request *req) { u8 one_entry_in_sg = 0; + u8 one_entry_in_dst = 0; u8 *src, *dst, *assoc, *tag; __be32 counter = cpu_to_be32(1); struct crypto_aead *tfm = crypto_aead_reqtfm(req); @@ -958,6 +959,26 @@ static int helper_rfc4106_encrypt(struct aead_request *req) *(iv+4+i) = req->iv[i]; *((__be32 *)(iv+12)) = counter; + if (req->src->length >= padded_assoclen) { + struct scatterlist tmp[2]; + struct scatterlist *sg; + //printk("Assoc is in one buf %li\n", req->src->length); + assoc = page_address(sg_page(req->src)) + req->src->offset; + sg = scatterwalk_ffwd(tmp, req->dst, padded_assoclen); + //printk("%li, %li\n", sg->length, req->cryptlen); + if (sg->length >= req->cryptlen) { + //printk("Crypt is in one buf %li, %li\n", sg->length, sg->offset); + struct scatterlist tmp2[2]; + dst = page_address(sg_page(sg)) + sg->offset; + sg = scatterwalk_ffwd(tmp2, sg, req->cryptlen); + if (sg->length >= auth_tag_len) { + //printk("tag is in one buf %li, %li\n", sg->length, sg->offset); + tag = page_address(sg_page(sg)) + sg->offset; + one_entry_in_dst = 1; + } + } + } + if (sg_is_last(req->src) && req->src->offset + req->src->length <= PAGE_SIZE && sg_is_last(req->dst) && @@ -968,37 +989,46 @@ static int helper_rfc4106_encrypt(struct aead_request *req) assoc = scatterwalk_map(&src_sg_walk); src = assoc + padded_assoclen; dst = src; + tag = dst + req->cryptlen; if (unlikely(req->src != req->dst)) { scatterwalk_start(&dst_sg_walk, req->dst); dst = scatterwalk_map(&dst_sg_walk) + padded_assoclen; } + } else if (one_entry_in_dst) { + //printk("One big buffer %p %li\n", page_address(sg_page(req->dst)), req->dst->offset); + //scatterwalk_start(&dst_sg_walk, req->dst); + //assoc = scatterwalk_map(&dst_sg_walk); + scatterwalk_map_and_copy(dst, req->src, padded_assoclen, req->cryptlen, 0); + //printk("Copied\n"); + src = dst; +/* + assoc = page_address(sg_page(req->src)) + req->src->offset; + dst = page_address(sg_page(sg_next(req->dst))) + sg_next(req->dst)->offset; + tag = page_address(sg_page(sg_next(sg_next(req->dst)))); + scatterwalk_map_and_copy(dst, sg_next(req->src), 0, req->cryptlen, 0); + src = dst; +*/ + } else { + //printk("Have to kmalloc. Needed: %li, had %li\n", + //req->cryptlen + auth_tag_len + padded_assoclen, req->dst->length); assoc = kmalloc(req->cryptlen + auth_tag_len + padded_assoclen, GFP_ATOMIC); scatterwalk_map_and_copy(assoc, req->src, 0, padded_assoclen + req->cryptlen, 0); src = assoc + padded_assoclen; dst = src; + tag = dst + req->cryptlen; -/* - assoc = page_address(sg_page(req->src)) + req->src->offset; - dst = page_address(sg_page(sg_next(req->dst))) + sg_next(req->dst)->offset; - tag = page_address(sg_page(sg_next(sg_next(req->dst)))); - scatterwalk_map_and_copy(dst, sg_next(req->src), 0, req->cryptlen, 0); - src = dst; -*/ } kernel_fpu_begin(); - /* + aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, ctx->hash_subkey, assoc, req->assoclen - 8, tag, auth_tag_len); - */ - aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, - ctx->hash_subkey, assoc, req->assoclen - 8, - dst + req->cryptlen, auth_tag_len); kernel_fpu_end(); + //printk("Done encrypt\n"); /* The authTag (aka the Integrity Check Value) needs to be written * back to the packet. */ @@ -1011,6 +1041,11 @@ static int helper_rfc4106_encrypt(struct aead_request *req) scatterwalk_unmap(assoc); scatterwalk_advance(&src_sg_walk, req->src->length); scatterwalk_done(&src_sg_walk, req->src == req->dst, 0); + } else if (one_entry_in_dst) { + //printk("One entry in dst\n"); + //scatterwalk_unmap(assoc); + //scatterwalk_advance(&dst_sg_walk, req->dst->length); + //scatterwalk_done(&dst_sg_walk, 1, 0); } else { scatterwalk_map_and_copy(dst, req->dst, padded_assoclen, req->cryptlen + auth_tag_len, 1); diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 70427f6..abae665 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -494,7 +494,12 @@ static unsigned int aead_poll(struct file *file, struct socket *sock, static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) { put_page(spd->pages[i]); - //__free_pages(spd->pages[i], 2); +} + +static void sock_spd_release_pages(struct splice_pipe_desc *spd, unsigned int i) +{ + printk("Freed pages %i\n", i); + __free_pages(spd->pages[i], 3); } static ssize_t alg_splice_read(struct socket *sock, loff_t *ppos, @@ -569,12 +574,24 @@ static ssize_t alg_splice_read(struct socket *sock, loff_t *ppos, .nr_pages = 0, }; + // TODO: onepage check for spd_release + /* convert iovecs of output buffers into scatterlists */ while (1) { - size_t seglen = min_t(size_t, PAGE_SIZE, + size_t seglen = min_t(size_t, PAGE_SIZE * 8, (outlen - usedpages)); + if (seglen > PAGE_SIZE) { + pages[cnt] = alloc_pages( __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY, 3); + if (pages[cnt] != NULL) { + //printk("Alloced one page %i of size %li\n", cnt, seglen); + spd.spd_release = sock_spd_release_pages; + } else { + printk("Could not alloc one page\n"); + pages[cnt] = alloc_page(GFP_KERNEL); + seglen = PAGE_SIZE; + } + } - pages[cnt] = alloc_page(GFP_KERNEL); partial[cnt].offset = 0; partial[cnt].len = seglen; diff --git a/crypto/algif_tls.c b/crypto/algif_tls.c index 3cc882c..a641d85 100644 --- a/crypto/algif_tls.c +++ b/crypto/algif_tls.c @@ -476,12 +476,18 @@ static int do_tls_kernel_sendpage(struct sock *sk) /* ctx->tcsgl[ctx->page_to_send - 1].length, */ /* page_address(sg_page(&ctx->tcsgl[ctx->page_to_send - 1]))); */ - if (ctx->page_to_send == 20 && 0) { - err = kernel_sendpage(ctx->sock->sk_socket, - sg_page(&ctx->tcsgl[ctx->page_to_send - 1]), + if (ctx->page_to_send == 2) { + int length = ctx->tcsgl[ctx->page_to_send - 1].length; + for (i = 0; i < 4; i++) { + err = kernel_sendpage( + ctx->sock->sk_socket, + sg_page(&ctx->tcsgl[ctx->page_to_send - 1]) + i, ctx->tcsgl[ctx->page_to_send - 1].offset, - ctx->tcsgl[ctx->page_to_send - 1].length, + min(length, PAGE_SIZE), MSG_DONTWAIT | MSG_MORE); + length -= PAGE_SIZE; + if (length < 0) break; + } } else { struct kvec iov; iov.iov_base = page_address(sg_page(&ctx->tcsgl[ctx->page_to_send - 1])) @@ -508,8 +514,12 @@ static int do_tls_kernel_sendpage(struct sock *sk) increment_seqno(ctx->iv_send); //for (i = 0; i < ctx->tcsgl_size; i++) - //put_page(sg_page(&ctx->tcsgl[1])); - //__free_pages(sg_page(&ctx->tcsgl[1]), 2); + put_page(sg_page(&ctx->tcsgl[1])); + //for (i = 0; i < 4; i++) { +// put_page(sg_page(&ctx->tcsgl[1]) + i); +//} + + //__free_pages(sg_page(&ctx->tcsgl[1]), 2); //printk("Finished sending\n"); tls_wmem_wakeup(sk); @@ -561,7 +571,7 @@ static int do_tls_sendpage(struct sock *sk) //printk("pfrag: %i, %i, %p\n", pfrag->size, pfrag->offset, pfrag->page); // pfrag->offset += 4096; -// sg_assign_page(sg, alloc_pages(GFP_KERNEL, 2)); + sg_assign_page(sg, alloc_pages(GFP_KERNEL, 2)); if (!sg_page(sg)) { err = -ENOMEM;