-
Notifications
You must be signed in to change notification settings - Fork 0
Draft VM approach
kildom edited this page Dec 10, 2019
·
8 revisions
Some Program:
/* Copyrights
XORCOL 0 @AUX > 0 @KEY // TODO: XORCOL and COPYCOL param sometimes reffers to index sometimes to column number.
XORCOL 0 @KEY > 1 @KEY // Should be column number to support keys longer than 128 bits
*/
--------------------------------
ROWSUB:
ROTSUB 0 @ST, 4
ROTSUB 1 @ST, 4
ROT 1 @ST, 4
ROTSUB 14 @ST, -4
ROT 14 @ST, -4
ROTSUB 15 @ST, -4
RET
MIXONECOL:
COPYCOL 0 @PTR > 0 @AUX
CALL 4, POLY
SETPTR 4 @PTR
RET
POLY:
ACCMUL 2, 0 @AUX > 0 @PTR
ACCMUL 3, 1 @AUX > 0 @PTR
ACCMUL 1, 2 @AUX > 0 @PTR
ACCMUL 1, 3 @AUX > 0 @PTR
ROT 0 @PTR, 1 // or 3 @PTR, -1
ROT 0 @AUX, 1 // or 3 @AUX, -1
RET
ADDKEY:
XORCOL 0 @KEY > 0 @ST
XORCOL 1 @KEY > 1 @ST
XORCOL 2 @KEY > 2 @ST
XORCOL 3 @KEY > 3 @ST
RET
INCKEY:
COPYCOL 12 @KEY > 0 @AUX
ROTSUB 0 @AUX, 1
ACCMUL 2, 4 @AUX > 0 @AUX
XORCOL 0 @AUX > 0 @KEY
XORCOL 0 @KEY > 1 @KEY
XORCOL 1 @KEY > 2 @KEY
XORCOL 2 @KEY > 3 @KEY
XORCOL 0 @AUX > 0 @AUX
ACCMUL 2, 4 @AUX > 3 @AUX
ROT 1 @AUX, 1
RET
#if AES_CIPHER
ROUND:
CALL ROWSUB
SETPTR 0 @ST
CALL 4, MIXONECOL
CALL ADDKEY
CALL INCKEY
RET
CIPHER:
CALL ADDKEY
CALL INCKEY
CALL 9, ROUND
CALL ROWSUB
CALL ADDKEY
RET
#if AES_256
ADDKEY_HI:
XORCOL 4 @KEY > 0 @ST
XORCOL 5 @KEY > 1 @ST
XORCOL 6 @KEY > 2 @ST
XORCOL 7 @KEY > 3 @ST
RET
ROUND256:
CALL ROWSUB
SETPTR 0 @ST
CALL 4, MIXONECOL
CALL ADDKEY_HI
CALL INCKEY
CALL ROWSUB
SETPTR 0 @ST
CALL 4, MIXONECOL
CALL ADDKEY
RET
CIPHER256:
CALL ADDKEY
CALL 8, ROUND256
CALL ROWSUB // ...
RET
#endif
REWIND:
CALL 10, INV_INCKEY
RET
#endif // AES_CIPHER
Compiler for Node.js
const fs = require('fs');
let code = fs.readFileSync('code.aesasm', 'UTF-8');
let output;
[output, code] = code.split('--------------------------------');
output = output.replace('\r', '').trim() + '\n\n';
code = code.trim();
code = code.split(/\r?\n/);
let part = { header: [], body: [], size: 0 };
let parts = [part];
let addresses = [];
let lineNumber = 0;
for (let line of code) {
let m;
lineNumber++;
line = line.trim();
if (line == '') {
continue;
} else if (line.startsWith('#')) {
if (part.body.length > 0) {
part = { header: [], body: [], size: 0 };
parts.push(part);
}
part.header.push(line);
continue;
}
let pos = line.indexOf('//');
let comment = '';
if (pos >= 0) {
comment = ' /* ' + line.substr(pos + 2).trim() + ' */';
line = line.substring(0, pos).trim();
}
if (line == '') {
throw Error('Not implemented');
} else if ((m = line.match(/^([A-Z_]+):$/i))) {
if (parts.length > 1)
addresses.push(`#define ADDR_${m[1]} (PART_${parts.length - 1}_OFFSET + ${part.size})`);
else
addresses.push(`#define ADDR_${m[1]} ${part.size}`);
part.body.push(` /* ${m[1]}: */${comment}`);
continue;
} else if ((m = line.match(/^CALL +([0-9]+) *, *([A-Z_]+)$/i))) {
instr = ` INSTR_CALL(${m[1]}, ADDR_${m[2]})`;
} else if ((m = line.match(/^CALL +([A-Z_]+)$/i))) {
instr = ` INSTR_CALL(1, ADDR_${m[1]})`;
} else if ((m = line.match(/^ROTSUB +([0-9]+) +@(ST|KEY|AUX|PTR) *, *([0-9-]+)$/i))) {
instr = ` INSTR_ROTSUB(1, ${m[1]}, SPACE_${m[2]}, ${m[3]})`;
} else if ((m = line.match(/^ROT +([0-9]+) +@(ST|KEY|AUX|PTR) *, *([0-9-]+)$/i))) {
instr = ` INSTR_ROTSUB(0, ${m[1]}, SPACE_${m[2]}, ${m[3]})`;
} else if ((m = line.match(/^RET$/i))) {
instr = ` INSTR_RET()`;
} else if ((m = line.match(/^SETPTR +([0-9]+) +@(ST|KEY|AUX|PTR)$/i))) {
instr = ` INSTR_SETPTR(${m[1]}, SPACE_${m[2]})`;
} else if ((m = line.match(/^COPYCOL +([0-9]+) +@(ST|KEY|AUX|PTR) +> +([0-9]+) +@(ST|KEY|AUX|PTR)$/i))) {
instr = ` INSTR_COPYCOL(0, ${m[1]}, SPACE_${m[2]}, ${m[3]}, SPACE_${m[4]})`;
} else if ((m = line.match(/^XORCOL +([0-9]+) +@(ST|KEY|AUX|PTR) +> +([0-9]+) +@(ST|KEY|AUX|PTR)$/i))) {
instr = ` INSTR_COPYCOL(1, ${m[1]}, SPACE_${m[2]}, ${m[3]}, SPACE_${m[4]})`;
} else if ((m = line.match(/^ACCMUL +([0-9]+) *, *([0-9]+) +@(ST|KEY|AUX|PTR) +> +([0-9]+) +@(ST|KEY|AUX|PTR)$/i))) {
instr = ` INSTR_ACCMUL(${m[1]}, ${m[2]}, SPACE_${m[3]}, ${m[4]}, SPACE_${m[5]})`;
} else {
console.log(JSON.stringify(parts, null, 4));
console.log(JSON.stringify(addresses, null, 4));
throw Error(`Syntax error on line ${lineNumber}: "${line}"`);
}
part.body.push(instr + comment);
part.size++;
}
output += '\n/********** Instructions separated by preprocessor **********/\n\n';
let filledParts = part.body.length > 0 ? parts.length : parts.length - 1;
for (let i = 0; i < parts.length; i++) {
let part = parts[i];
output += part.header.reduce((s, v) => s + `${v}\n`, '');
if (part.body.length) {
output += `#define PART_${i} \\\n${part.body.join(' \\\n')}\n`;
output += `#define PART_${i}_SIZE ${part.size}\n`;
}
}
output += '\n/********** Define undefined parts as empty **********/\n\n';
for (let i = 0; i < filledParts; i++) {
let part = parts[i];
output += `#ifndef PART_${i}\n`;
output += `#define PART_${i}\n`;
output += `#define PART_${i}_SIZE 0\n`;
output += `#endif\n`;
}
output += '\n/********** Calculation of address offset of each part **********/\n\n';
output += `#define PART_1_OFFSET PART_0_SIZE\n`;
for (let i = 2; i < filledParts; i++) {
output += `#define PART_${i}_OFFSET PART_${i - 1}_OFFSET + PART_${i - 1}_SIZE\n`;
}
output += `#define BODY_SIZE PART_${filledParts - 1}_OFFSET + PART_${filledParts - 1}_SIZE\n`;
output += '\n/********** Addresses of each bytecode symbol **********/\n\n';
output += addresses.join('\n') + '\n';
output += '\n/********** Body of program for VM **********/\n\n';
output += '#define BODY \\\n';
for (let i = 0; i < filledParts - 1; i++) {
output += ` PART_${i} \\\n`;
}
output += ` PART_${filledParts - 1}\n`;
output += '\n/********** End of VM program **********/\n\n';
fs.writeFileSync('code.inc', output);
VM draft:
#define INSTR 0x23
const uint8_t program = {
INSTR(ROTSUB, ST, 0, 0, 4),
INSTR(ROTSUB, ST, 0, 1, 4),
//...
};
void rot(uint8_t* src, uint8_t* dst, uint8_t opt, uint8_t srcIndex, uint8_t dstIndex)
{
// read 4 times: src[srcIndex] -> src[srcIndex] step by dstIndex with sign
// sbox if opt
}
void accmul(uint8_t* src, uint8_t* dst, uint8_t opt, uint8_t srcIndex, uint8_t dstIndex)
{
// dst[3 * opt] ^= dstIndex *GF2MUL* src[srcIndex]
}
void colcopy(uint8_t* src, uint8_t* dst, uint8_t opt, uint8_t srcIndex, uint8_t dstIndex)
{
// copy column from src to dest. If opt set xor with destination
}
void setptr(uint8_t* src, uint8_t* dst, uint8_t opt, uint8_t srcIndex, uint8_t dstIndex)
{
// assumtion dst == aux
uint8_t** ptr_pointer = ((uint8_t**)dst) - (AUX_OFFSET - PTR_PTR_OFFSET);
*ptr_pointer = src + srcIndex;
}
/*
VM_STATE:
0 1 aes state ptr
2 3 aes key ptr
4 5 aux ptr
6 7 ptr ptr
8...AUX
TODO: SBOX ptr to support both SBOX and InvSBOX
*/
void call(uint8_t* src, uint8_t* dst, uint8_t opt, uint8_t srcIndex, uint8_t dstIndex)
{
// assumtion src == aux
uint8_t* state = src - AUX_OFFSET;
uint8_t addr = ((opt & 0x7) << 1) | (dstIndex << 4);
while (srcIndex)
{
vm_internal(state, addr);
srcIndex--;
}
}
void vm(uint8_t pc, uint8_t* state, uint8_t* key)
{
uint8_t* vm_state[8] = { state, key };
vm_state[2] = &vm_state[4];
// TODO: set Rcon in state
vm_internal(vm_state, pc);
}
void vm_internal(uint8_t** vm_state, uint8_t pc)
{
while (1) {
uint8_t instr = program[pc++];
if (instr == 0x00) break; // call with src != aux
uint8_t param = program[pc++];
uint8_t code = instr >> 5;
uint8_t* src = pointers[(instr & 0x18) >> 3];
uint8_t* dst = pointers[(instr & 0x06) >> 1];
uint8_t srcIndex = (param & 0xF0) >> 4;
uint8_t dstIndex = param & 0xF0;
instr_funcs[code](src, dst, code, srcIndex, dstIndex);
}
}
The same approach for block modes:
/*
alternate nonation
ECB_ENC:
COPY plain > cipher cipher = plain
AES cipher AES cipher
ECB_DEC:
COPY cipher > plain plain = cipher
INVAES plain INVAES plain
CBC_ENC:
XOR plain > iv iv ^= plain
AES iv AES iv
COPY iv > cipher cipher = iv
CFB_ENC:
AES iv AES iv
RESUME_POINT ---
XOR plain > iv iv ^= plain
COPY iv > cipher cipher = iv
CFB_DEC:
AES iv AES iv
temp = iv
---
iv = cipher
plain = temp
plain ^= iv
OFB_DEC:
OFB_ENC:
AES iv
RESUME_POINT
COPY iv > cipher
XOR plain > cipher
CTR_DEC:
CTR_ENC:
COPY iv > temp temp = iv
INC iv iv++
AES temp AES temp
RESUME_POINT ---
COPY temp > cipher cipher = temp
XOR plain > cipher cipher ^= plain
CTR_DEC_NO_RESUME:
CTR_ENC_NO_RESUME:
COPY iv > cipher
INC iv
AES cipher
XOR plain > cipher
XTS_ENC (for the future, must be sorounded by higher level function):
---
temp = plain
temp ^= iv
AES temp
temp ^= iv
cipher = temp
in hihger level:
MULALPHA(iv)
switch ending with beginning of previous block
copy remaining bytes from 'temp'
XTS_DEC:
---
???
SPACES:
plain
cipher
iv
temp
OPERATIONS:
full:
AES
INVAES
INC
partial:
COPY - copy full or partial
XOR - xor full or partial
| 4 | 2 | 2 |
| instr | S | D |
*/
void instr_aes(uint8_t* dst, uint8_t* src, uint8_t* key, uint8_t length)
{
aes_encrypt(dst, key);
aes_key_rewind(key);
}
void instr_invaes(uint8_t* dst, uint8_t* src, uint8_t* key, uint8_t length)
{
aes_key_patch(key);
aes_decrypt(dst, key);
}
void instr_inc(uint8_t* dst, uint8_t* src, uint8_t* key, uint8_t length)
{
uint8_t* end = &dst[15];
while (end >= dst)
{
if ((++(*end)) != 0) break;
end--;
}
#if 0
int8_t i;
for (i = 15; i >= 0; i--)
if(++dst[i] != 0)
break;
#endif
}
void instr_copy(uint8_t* dst, uint8_t* src, uint8_t* key, uint8_t length)
{
memcpy(dst, src, length);
}
void instr_xor(uint8_t* dst, uint8_t* src, uint8_t* key, uint8_t length)
{
uint8_t* end = &dst[length];
while (dst < end)
{
*dst ^= *src;
src++;
dst++;
}
}
void block_mode(uint8_t mode_address, uint8_t* input, uint8_t* output, uint8_t* key, uint8_t* iv, uint8_t* temp, uint16_t length)
{
uint8_t* spaces[4] = { input, output };
while (length)
{
uint8_t start = temp[0];
uint8_t left = 16 - start;
uint8_t part = (length < left) ? length : left;
spaces[2] = &iv[start];
spaces[3] = &temp[start];
uint8_t pc = mode_address & 0x3F;
if (start > 0) pc += mode_address >> 6;
block_vm(pc, spaces, key, part);
length -= part;
spaces[0] += part;
spaces[1] += part;
temp[0] = 16 - part - start;
}
}
void block_vm(uint8_t pc, uint8_t* spaces[4], uint8_t* key, uint8_t length)
{
while (1)
{
uint8_t instr = prog[pc++];
if (instr == INSTR_RET) break;
funcs[instr >> 4](spaces[(instr >> 2) & 3], spaces[instr & 3], key, length);
}
}
// Maybe this will be smaller:
static inline void block_vm(uint8_t pc, uint8_t* spaces[4], uint8_t* key, uint8_t start, uint8_t length)
{
while (1)
{
uint8_t instr = prog[pc++];
if (instr == INSTR_RET) break;
funcs[instr >> 4](spaces[(instr >> 2) & 3] + start, spaces[instr & 3] + start, key, length);
}
}
void block_mode(uint8_t mode_address, uint8_t* input, uint8_t* output, uint8_t* key, uint8_t* iv, uint8_t* temp, uint16_t length)
{
uint8_t* spaces[5] = { input, output, iv, temp, key };
while (length)
{
uint8_t start = temp[0];
uint8_t left = 16 - start;
uint8_t part = (length < left) ? length : left;
uint8_t pc = mode_address & 0x3F;
if (start > 0) pc += mode_address >> 6;
block_vm(pc, spaces, key, start, part);
length -= part;
temp[0] = 16 - part - start;
}
}
//OR
static inline void instr_all(uint8_t* dst, uint8_t* src, uint8_t length, uint8_t op)
{
int8_t i;
uint8_t mask = 0xFF;
switch (op & 0xE0)
{
case 0x00: // AES\s+(iv|input|output|temp)
aesCipher(dst, src);
aesKeyRewind(src);
break;
case 0x20: // INVAES\s+(iv|input|output|temp)
aesKeyPatch(src);
aesInvCipher(dst, src);
break;
case 0x40: // (iv|input|output|temp)\s*++
for (i = 15; i >= 0; i--)
if(++dst[i] != 0)
break;
break;
case 0x60: // (iv|input|output|temp)\s*=\s*(iv|input|output|temp)
mask = 0x00;
default: // (iv|input|output|temp)\s*\^=\s*(iv|input|output|temp)
while (length--)
{
*dst = (*dst & mask) ^ *src;
dst++;
src++;
}
break;
}
}
static inline void block_vm2(uint8_t pc, uint8_t* spaces[5], uint8_t length)
{
while (1)
{
uint8_t instr = pgm_read_byte((unsigned)prog + (pc++));
if (instr == INSTR_RET) break;
instr_all(spaces[instr & 3], spaces[(instr >> 2) & 7], length, instr);
}
}