Skip to content
This repository
Browse code

random: mix in architectural randomness in extract_buf()

commit d2e7c96 upstream.

Mix in any architectural randomness in extract_buf() instead of
xfer_secondary_buf().  This allows us to mix in more architectural
randomness, and it also makes xfer_secondary_buf() faster, moving a
tiny bit of additional CPU overhead to process which is extracting the
randomness.

[ Commit description modified by tytso to remove an extended
  advertisement for the RDRAND instruction. ]

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: DJ Johnston <dj.johnston@intel.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
  • Loading branch information...
commit 7499bd63d777215d16810b6fc9bd58fe83b4e576 1 parent 46b4d87
H. Peter Anvin authored Ben Hutchings committed

Showing 1 changed file with 32 additions and 24 deletions. Show diff stats Hide diff stats

  1. +32 24 drivers/char/random.c
56 drivers/char/random.c
@@ -274,6 +274,8 @@
274 274 #define SEC_XFER_SIZE 512
275 275 #define EXTRACT_SIZE 10
276 276
  277 +#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
  278 +
277 279 /*
278 280 * The minimum number of bits of entropy before we wake up a read on
279 281 * /dev/random. Should be enough to do a significant reseed.
@@ -835,11 +837,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
835 837 */
836 838 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
837 839 {
838   - union {
839   - __u32 tmp[OUTPUT_POOL_WORDS];
840   - long hwrand[4];
841   - } u;
842   - int i;
  840 + __u32 tmp[OUTPUT_POOL_WORDS];
843 841
844 842 if (r->pull && r->entropy_count < nbytes * 8 &&
845 843 r->entropy_count < r->poolinfo->POOLBITS) {
@@ -850,23 +848,17 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
850 848 /* pull at least as many as BYTES as wakeup BITS */
851 849 bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
852 850 /* but never more than the buffer size */
853   - bytes = min_t(int, bytes, sizeof(u.tmp));
  851 + bytes = min_t(int, bytes, sizeof(tmp));
854 852
855 853 DEBUG_ENT("going to reseed %s with %d bits "
856 854 "(%d of %d requested)\n",
857 855 r->name, bytes * 8, nbytes * 8, r->entropy_count);
858 856
859   - bytes = extract_entropy(r->pull, u.tmp, bytes,
  857 + bytes = extract_entropy(r->pull, tmp, bytes,
860 858 random_read_wakeup_thresh / 8, rsvd);
861   - mix_pool_bytes(r, u.tmp, bytes, NULL);
  859 + mix_pool_bytes(r, tmp, bytes, NULL);
862 860 credit_entropy_bits(r, bytes*8);
863 861 }
864   - kmemcheck_mark_initialized(&u.hwrand, sizeof(u.hwrand));
865   - for (i = 0; i < 4; i++)
866   - if (arch_get_random_long(&u.hwrand[i]))
867   - break;
868   - if (i)
869   - mix_pool_bytes(r, &u.hwrand, sizeof(u.hwrand), 0);
870 862 }
871 863
872 864 /*
@@ -923,15 +915,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
923 915 static void extract_buf(struct entropy_store *r, __u8 *out)
924 916 {
925 917 int i;
926   - __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
  918 + union {
  919 + __u32 w[5];
  920 + unsigned long l[LONGS(EXTRACT_SIZE)];
  921 + } hash;
  922 + __u32 workspace[SHA_WORKSPACE_WORDS];
927 923 __u8 extract[64];
928 924 unsigned long flags;
929 925
930 926 /* Generate a hash across the pool, 16 words (512 bits) at a time */
931   - sha_init(hash);
  927 + sha_init(hash.w);
932 928 spin_lock_irqsave(&r->lock, flags);
933 929 for (i = 0; i < r->poolinfo->poolwords; i += 16)
934   - sha_transform(hash, (__u8 *)(r->pool + i), workspace);
  930 + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
935 931
936 932 /*
937 933 * We mix the hash back into the pool to prevent backtracking
@@ -942,14 +938,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
942 938 * brute-forcing the feedback as hard as brute-forcing the
943 939 * hash.
944 940 */
945   - __mix_pool_bytes(r, hash, sizeof(hash), extract);
  941 + __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
946 942 spin_unlock_irqrestore(&r->lock, flags);
947 943
948 944 /*
949 945 * To avoid duplicates, we atomically extract a portion of the
950 946 * pool while mixing, and hash one final time.
951 947 */
952   - sha_transform(hash, extract, workspace);
  948 + sha_transform(hash.w, extract, workspace);
953 949 memset(extract, 0, sizeof(extract));
954 950 memset(workspace, 0, sizeof(workspace));
955 951
@@ -958,11 +954,23 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
958 954 * pattern, we fold it in half. Thus, we always feed back
959 955 * twice as much data as we output.
960 956 */
961   - hash[0] ^= hash[3];
962   - hash[1] ^= hash[4];
963   - hash[2] ^= rol32(hash[2], 16);
964   - memcpy(out, hash, EXTRACT_SIZE);
965   - memset(hash, 0, sizeof(hash));
  957 + hash.w[0] ^= hash.w[3];
  958 + hash.w[1] ^= hash.w[4];
  959 + hash.w[2] ^= rol32(hash.w[2], 16);
  960 +
  961 + /*
  962 + * If we have a architectural hardware random number
  963 + * generator, mix that in, too.
  964 + */
  965 + for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
  966 + unsigned long v;
  967 + if (!arch_get_random_long(&v))
  968 + break;
  969 + hash.l[i] ^= v;
  970 + }
  971 +
  972 + memcpy(out, &hash, EXTRACT_SIZE);
  973 + memset(&hash, 0, sizeof(hash));
966 974 }
967 975
968 976 static ssize_t extract_entropy(struct entropy_store *r, void *buf,

0 comments on commit 7499bd6

Please sign in to comment.
Something went wrong with that request. Please try again.