Skip to content

Commit

Permalink
add: rnn prep code
Browse files Browse the repository at this point in the history
  • Loading branch information
ltbringer committed Mar 28, 2019
1 parent bbcde21 commit 4026cfb
Show file tree
Hide file tree
Showing 9 changed files with 26,786 additions and 2 deletions.
2 changes: 2 additions & 0 deletions .elasticbeanstalk/config.yml
@@ -0,0 +1,2 @@
global:
profile: null
26,710 changes: 26,710 additions & 0 deletions fuzz.log

Large diffs are not rendered by default.

8 changes: 7 additions & 1 deletion fuzzer/__main__.py
Expand Up @@ -9,7 +9,6 @@
# api_list = cli_args.api_json

# expect like in example:
#
server_host = 'http://localhost'
server_port = 3000
api_list = [{
Expand All @@ -24,6 +23,13 @@
'message': 'hello world'
}
}
}, {
'url': '/string-match',
'method': 'POST',
'body': {
'reference': '',
'hypothesis': ''
}
}]


Expand Down
Empty file added fuzzer/alg/__init__.py
Empty file.
Empty file added fuzzer/alg/nn/__init__.py
Empty file.
23 changes: 23 additions & 0 deletions fuzzer/alg/nn/preprocess.py
@@ -0,0 +1,23 @@
import string
import torch


all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)


def letter_to_index(letter):
return all_letters.find(letter)


def letter_to_tensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letter_to_index(letter)] = 1
return tensor


def line_to_tensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letter_to_index(letter)] = 1
return tensor
43 changes: 43 additions & 0 deletions fuzzer/alg/nn/rnn.py
@@ -0,0 +1,43 @@
import torch
import torch.nn as nn


class RNN(nn.Module):
# https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()

self.hidden_size = hidden_size

self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
self.learning_rate = 0.005
criterion = nn.NLLLoss()

def forward(self, input_, hidden):
combined = torch.cat((input_, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden

def init_hidden(self):
return torch.zeros(1, self.hidden_size)

def train_network(self, category_tensor, line_tensor):
hidden = self.initHidden()

self.zero_grad()
output = None
for i in range(line_tensor.size()[0]):
output, hidden = self.rnn(line_tensor[i], hidden)

loss = self.criterion(output, category_tensor)
loss.backward()

# Add parameters' gradients to their values, multiplied by learning rate
for p in self.rnn.parameters():
p.data.add_(-self.learning_rate, p.grad.data)

return output, loss.item()
Empty file added fuzzer/alg/nn/train.py
Empty file.
2 changes: 1 addition & 1 deletion fuzzer/primitive_fuzzer/fuzz.py
Expand Up @@ -5,7 +5,7 @@ def random_integers(lower_limit=0, upper_limit=100):
return random.randint(lower_limit, upper_limit)


def random_ascii_chars(max_char_len=1000):
def random_ascii_chars(max_char_len=20):
char_len = random_integers(lower_limit=0, upper_limit=max_char_len)
return ''.join([chr(random_integers(0, 128)) for n in range(char_len)])

Expand Down

0 comments on commit 4026cfb

Please sign in to comment.