Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add patch to allow buffered mnemonics as valid type as args on some methods #1

Merged
merged 9 commits into from
Apr 13, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 51 additions & 8 deletions src/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,12 @@ function deriveChecksumBits(entropyBuffer) {
function salt(password) {
return 'mnemonic' + (password || '');
}
// When the mnemonic argument is passed as a buffer, it should be
// a buffer of a string that is normalized to NFKD format
function mnemonicToSeedSync(mnemonic, password) {
const mnemonicBuffer = Buffer.from(normalize(mnemonic), 'utf8');
const mnemonicBuffer = typeof mnemonic === 'string'
? Buffer.from(normalize(mnemonic), 'utf8')
: mnemonic;
const saltBuffer = Buffer.from(salt(normalize(password)), 'utf8');
return pbkdf2_1.pbkdf2Sync(mnemonicBuffer, saltBuffer, 2048, 64, 'sha512');
}
Expand All @@ -63,19 +67,36 @@ function mnemonicToSeed(mnemonic, password) {
});
}
exports.mnemonicToSeed = mnemonicToSeed;
// When the mnemonic argument is passed as a buffer, it should be
// a buffer of a string that is normalized to NFKD format
function mnemonicToEntropy(mnemonic, wordlist) {
wordlist = wordlist || DEFAULT_WORDLIST;
if (!wordlist) {
throw new Error(WORDLIST_REQUIRED);
}
const words = normalize(mnemonic).split(' ');
const mnemonicAsBuffer = typeof mnemonic === 'string'
? Buffer.from(normalize(mnemonic), 'utf8')
: mnemonic;
const words = [];
let currentWord = [];
for (const byte of mnemonicAsBuffer.values()) {
// split at space or \u3000 (ideographic space, for Japanese wordlists)
if (byte === 0x20 || byte === 0x3000) {
words.push(Buffer.from(currentWord));
currentWord = [];
}
else {
currentWord.push(byte);
}
}
words.push(Buffer.from(currentWord));
if (words.length % 3 !== 0) {
throw new Error(INVALID_MNEMONIC);
}
// convert word indices to 11 bit binary strings
const bits = words
.map((word) => {
const index = wordlist.indexOf(word);
const index = wordlist.indexOf(word.toString('utf8'));
if (index === -1) {
throw new Error(INVALID_MNEMONIC);
}
Expand Down Expand Up @@ -127,13 +148,35 @@ function entropyToMnemonic(entropy, wordlist) {
const checksumBits = deriveChecksumBits(entropy);
const bits = entropyBits + checksumBits;
const chunks = bits.match(/(.{1,11})/g);
const words = chunks.map((binary) => {
const wordsAsBuffers = chunks.map((binary) => {
const index = binaryToByte(binary);
return wordlist[index];
wordlist = wordlist || [];
return Buffer.from(normalize(wordlist[index]), 'utf8');
});
return wordlist[0] === '\u3042\u3044\u3053\u304f\u3057\u3093' // Japanese wordlist
? words.join('\u3000')
: words.join(' ');
const separator = wordlist[0] === '\u3042\u3044\u3053\u304f\u3057\u3093' // Japanese wordlist
? '\u3000'
: ' ';
const separatorByteLength = Buffer.from(separator, 'utf8').length;
const bufferSize = wordsAsBuffers.reduce((currentBufferSize, wordAsBuffer, i) => {
const shouldAddSeparator = i < wordsAsBuffers.length - 1;
return (currentBufferSize +
wordAsBuffer.length +
(shouldAddSeparator ? separatorByteLength : 0));
}, 0);
const { workingBuffer } = wordsAsBuffers.reduce((result, wordAsBuffer, i) => {
const shouldAddSeparator = i < wordsAsBuffers.length - 1;
result.workingBuffer.set(wordAsBuffer, result.offset);
if (shouldAddSeparator) {
result.workingBuffer.write(separator, result.offset + wordAsBuffer.length, separatorByteLength, 'utf8');
}
return {
workingBuffer: result.workingBuffer,
offset: result.offset +
wordAsBuffer.length +
(shouldAddSeparator ? separatorByteLength : 0),
};
}, { workingBuffer: Buffer.alloc(bufferSize), offset: 0 });
return workingBuffer;
}
exports.entropyToMnemonic = entropyToMnemonic;
function generateMnemonic(strength, rng, wordlist) {
Expand Down
23 changes: 14 additions & 9 deletions test/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ var WORDLISTS = {
english: require('../src/wordlists/english.json'),
japanese: require('../src/wordlists/japanese.json'),
custom: require('./wordlist.json')
}
};

var vectors = require('./vectors.json')
var test = require('tape')
Expand All @@ -15,22 +15,27 @@ function testVector (description, wordlist, password, v, i) {
var vseedHex = v[2]

test('for ' + description + '(' + i + '), ' + ventropy, function (t) {
t.plan(6)
t.plan(8)

t.equal(bip39.mnemonicToEntropy(vmnemonic, wordlist), ventropy, 'mnemonicToEntropy returns ' + ventropy.slice(0, 40) + '...')
// and mnemonicToEntropy should work with mnemonic arg as type buffer
t.equal(bip39.mnemonicToEntropy(Buffer.from(vmnemonic.normalize('NFKD'), 'utf8'), wordlist), ventropy, 'mnemonicToEntropy returns ' + ventropy.slice(0, 40) + '...')

t.equal(bip39.mnemonicToSeedSync(vmnemonic, password).toString('hex'), vseedHex, 'mnemonicToSeedSync returns ' + vseedHex.slice(0, 40) + '...')
// and mnemonicToSeedSync should work with mnemonic arg as type buffer
t.equal(bip39.mnemonicToSeedSync(Buffer.from(vmnemonic.normalize('NFKD'), 'utf8'), password).toString('hex'), vseedHex, 'mnemonicToSeedSync returns ' + vseedHex.slice(0, 40) + '...')

bip39.mnemonicToSeed(vmnemonic, password).then(function (asyncSeed) {
t.equal(asyncSeed.toString('hex'), vseedHex, 'mnemonicToSeed returns ' + vseedHex.slice(0, 40) + '...')
})
t.equal(bip39.entropyToMnemonic(ventropy, wordlist), vmnemonic, 'entropyToMnemonic returns ' + vmnemonic.slice(0, 40) + '...')

t.equal(bip39.entropyToMnemonic(ventropy, wordlist).toString(), vmnemonic, 'entropyToMnemonic returns ' + vmnemonic.slice(0, 40) + '...')
function rng () { return Buffer.from(ventropy, 'hex') }
t.equal(bip39.generateMnemonic(undefined, rng, wordlist), vmnemonic, 'generateMnemonic returns RNG entropy unmodified')
t.equal(bip39.generateMnemonic(undefined, rng, wordlist).toString(), vmnemonic, 'generateMnemonic returns RNG entropy unmodified')
t.equal(bip39.validateMnemonic(vmnemonic, wordlist), true, 'validateMnemonic returns true')
})
}

vectors.english.forEach(function (v, i) { testVector('English', undefined, 'TREZOR', v, i) })
vectors.english.forEach(function(v, i) { testVector('English', undefined, 'TREZOR', v, i) })
vectors.japanese.forEach(function (v, i) { testVector('Japanese', WORDLISTS.japanese, '㍍ガバヴァぱばぐゞちぢ十人十色', v, i) })
vectors.custom.forEach(function (v, i) { testVector('Custom', WORDLISTS.custom, undefined, v, i) })

Expand All @@ -51,12 +56,12 @@ test('setDefaultWordlist changes default wordlist', function (t) {
const italian = bip39.getDefaultWordlist()
t.equal(italian, 'italian')

const phraseItalian = bip39.entropyToMnemonic('00000000000000000000000000000000')
const phraseItalian = bip39.entropyToMnemonic('00000000000000000000000000000000').toString();
t.equal(phraseItalian.slice(0, 5), 'abaco')

bip39.setDefaultWordlist('english')

const phraseEnglish = bip39.entropyToMnemonic('00000000000000000000000000000000')
const phraseEnglish = bip39.entropyToMnemonic('00000000000000000000000000000000').toString();
t.equal(phraseEnglish.slice(0, 7), 'abandon')
})

Expand Down Expand Up @@ -106,7 +111,7 @@ test('UTF8 passwords', function (t) {
})

test('generateMnemonic can vary entropy length', function (t) {
var words = bip39.generateMnemonic(160).split(' ')
var words = bip39.generateMnemonic(160).toString().split(' ')

t.plan(1)
t.equal(words.length, 15, 'can vary generated entropy bit length')
Expand Down
4 changes: 2 additions & 2 deletions test/readme.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ const test = require('tape')
test('README example 1', function (t) {
// defaults to BIP39 English word list
const entropy = 'ffffffffffffffffffffffffffffffff'
const mnemonic = bip39.entropyToMnemonic(entropy)
const mnemonic = bip39.entropyToMnemonic(entropy).toString();

t.plan(2)
t.equal(mnemonic, 'zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong')
Expand All @@ -24,7 +24,7 @@ test('README example 2', function (t) {
const proxiedbip39 = proxyquire('../', stub)

// mnemonic strength defaults to 128 bits
const mnemonic = proxiedbip39.generateMnemonic()
const mnemonic = proxiedbip39.generateMnemonic().toString();

t.plan(2)
t.equal(mnemonic, 'imitate robot frame trophy nuclear regret saddle around inflict case oil spice')
Expand Down
97 changes: 83 additions & 14 deletions ts_src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,17 @@ function salt(password?: string): string {
return 'mnemonic' + (password || '');
}

// When the mnemonic argument is passed as a buffer, it should be
// a buffer of a string that is normalized to NFKD format
export function mnemonicToSeedSync(
mnemonic: string,
mnemonic: string | Buffer,
password?: string,
): Buffer {
const mnemonicBuffer = Buffer.from(normalize(mnemonic), 'utf8');
const mnemonicBuffer =
typeof mnemonic === 'string'
? Buffer.from(normalize(mnemonic), 'utf8')
: mnemonic;

const saltBuffer = Buffer.from(salt(normalize(password)), 'utf8');

return pbkdf2Sync(mnemonicBuffer, saltBuffer, 2048, 64, 'sha512');
Expand All @@ -92,25 +98,45 @@ export function mnemonicToSeed(
);
}

// When the mnemonic argument is passed as a buffer, it should be
// a buffer of a string that is normalized to NFKD format
export function mnemonicToEntropy(
mnemonic: string,
mnemonic: string | Buffer,
wordlist?: string[],
): string {
wordlist = wordlist || DEFAULT_WORDLIST;
if (!wordlist) {
throw new Error(WORDLIST_REQUIRED);
}

const words = normalize(mnemonic).split(' ');
const mnemonicAsBuffer =
typeof mnemonic === 'string'
? Buffer.from(normalize(mnemonic), 'utf8')
: mnemonic;

const words = [];
let currentWord = [];
for (const byte of mnemonicAsBuffer.values()) {
// split at space or \u3000 (ideographic space, for Japanese wordlists)
if (byte === 0x20 || byte === 0x3000) {
words.push(Buffer.from(currentWord));
currentWord = [];
} else {
currentWord.push(byte);
}
}

words.push(Buffer.from(currentWord));

if (words.length % 3 !== 0) {
throw new Error(INVALID_MNEMONIC);
}

// convert word indices to 11 bit binary strings
const bits = words
.map(
(word: string): string => {
const index = wordlist!.indexOf(word);
(word: Buffer): string => {
const index = wordlist!.indexOf(word.toString('utf8'));
if (index === -1) {
throw new Error(INVALID_MNEMONIC);
}
Expand Down Expand Up @@ -149,7 +175,7 @@ export function mnemonicToEntropy(
export function entropyToMnemonic(
entropy: Buffer | string,
wordlist?: string[],
): string {
): Buffer {
if (!Buffer.isBuffer(entropy)) {
entropy = Buffer.from(entropy, 'hex');
}
Expand All @@ -174,23 +200,66 @@ export function entropyToMnemonic(

const bits = entropyBits + checksumBits;
const chunks = bits.match(/(.{1,11})/g)!;
const words = chunks.map(
(binary: string): string => {
const wordsAsBuffers = chunks.map(
(binary: string): Buffer => {
const index = binaryToByte(binary);
return wordlist![index];
wordlist = wordlist || [];
return Buffer.from(normalize(wordlist[index]), 'utf8');
},
);

return wordlist[0] === '\u3042\u3044\u3053\u304f\u3057\u3093' // Japanese wordlist
? words.join('\u3000')
: words.join(' ');
const separator =
wordlist[0] === '\u3042\u3044\u3053\u304f\u3057\u3093' // Japanese wordlist
? '\u3000'
: ' ';
const separatorByteLength = Buffer.from(separator, 'utf8').length;

const bufferSize = wordsAsBuffers.reduce(
(currentBufferSize: number, wordAsBuffer: Buffer, i: number): number => {
const shouldAddSeparator = i < wordsAsBuffers.length - 1;
return (
currentBufferSize +
wordAsBuffer.length +
(shouldAddSeparator ? separatorByteLength : 0)
);
},
0,
);

const { workingBuffer }: { workingBuffer: Buffer } = wordsAsBuffers.reduce(
(
result: { workingBuffer: Buffer; offset: number },
wordAsBuffer: Buffer,
i: number,
): { workingBuffer: Buffer; offset: number } => {
const shouldAddSeparator = i < wordsAsBuffers.length - 1;
result.workingBuffer.set(wordAsBuffer, result.offset);
if (shouldAddSeparator) {
result.workingBuffer.write(
separator,
result.offset + wordAsBuffer.length,
separatorByteLength,
'utf8',
);
}
return {
workingBuffer: result.workingBuffer,
offset:
result.offset +
wordAsBuffer.length +
(shouldAddSeparator ? separatorByteLength : 0),
};
},
{ workingBuffer: Buffer.alloc(bufferSize), offset: 0 },
);
return workingBuffer;
}

export function generateMnemonic(
strength?: number,
rng?: (size: number) => Buffer,
wordlist?: string[],
): string {
): Buffer {
strength = strength || 128;
if (strength % 32 !== 0) {
throw new TypeError(INVALID_ENTROPY);
Expand Down
8 changes: 4 additions & 4 deletions types/index.d.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
/// <reference types="node" />
export declare function mnemonicToSeedSync(mnemonic: string, password?: string): Buffer;
export declare function mnemonicToSeedSync(mnemonic: string | Buffer, password?: string): Buffer;
export declare function mnemonicToSeed(mnemonic: string, password?: string): Promise<Buffer>;
export declare function mnemonicToEntropy(mnemonic: string, wordlist?: string[]): string;
export declare function entropyToMnemonic(entropy: Buffer | string, wordlist?: string[]): string;
export declare function generateMnemonic(strength?: number, rng?: (size: number) => Buffer, wordlist?: string[]): string;
export declare function mnemonicToEntropy(mnemonic: string | Buffer, wordlist?: string[]): string;
export declare function entropyToMnemonic(entropy: Buffer | string, wordlist?: string[]): Buffer;
export declare function generateMnemonic(strength?: number, rng?: (size: number) => Buffer, wordlist?: string[]): Buffer;
export declare function validateMnemonic(mnemonic: string, wordlist?: string[]): boolean;
export declare function setDefaultWordlist(language: string): void;
export declare function getDefaultWordlist(): string;
Expand Down