Skip to content

Commit

Permalink
Merge pull request bitly#47 from ploxiln/simpleleveldb_47
Browse files Browse the repository at this point in the history
simpleleveldb list/set endpoints
  • Loading branch information
mreiferson committed May 8, 2012
2 parents d32a797 + 94b75ab commit 449795d
Show file tree
Hide file tree
Showing 13 changed files with 1,170 additions and 288 deletions.
1 change: 1 addition & 0 deletions .gitignore
Expand Up @@ -2,6 +2,7 @@
*.a
*.dSYM
*.pyc
*.deps
build
dist
sortdb/sortdb
Expand Down
3 changes: 3 additions & 0 deletions conftest.py
@@ -0,0 +1,3 @@
# needed for py.test to accept the --valgrind option
def pytest_addoption(parser):
parser.addoption("--no-valgrind", action="store_true", help="disable valgrind analysis")
7 changes: 2 additions & 5 deletions queuereader/queuereader.c
@@ -1,3 +1,4 @@
#define _GNU_SOURCE // for strndup()
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
Expand Down Expand Up @@ -28,7 +29,6 @@ void queuereader_increment_backoff();
void queuereader_decrement_backoff();

static char *message = NULL;
static size_t message_len;
static int backoff_counter = 0;
static struct GlobalData *data = NULL;
static struct event ev;
Expand Down Expand Up @@ -137,7 +137,6 @@ void queuereader_source_cb(struct evhttp_request *req, void *cbarg)
struct GlobalData *client_data = (struct GlobalData *)cbarg;
char *line;
size_t line_len;
char *tmp = NULL;
struct evbuffer *evb;
int ret = QR_EMPTY;

Expand All @@ -150,9 +149,7 @@ void queuereader_source_cb(struct evhttp_request *req, void *cbarg)
line = (char *)EVBUFFER_DATA(evb);
line_len = EVBUFFER_LENGTH(evb);
if (line_len) {
message = line;
message_len = line_len;
DUPE_N_TERMINATE(message, message_len, tmp);
message = strndup(line, line_len);
ret = (*client_data->message_cb)(message, client_data->cbarg);
}

Expand Down
56 changes: 26 additions & 30 deletions shared_tests/test_shunt.py
Expand Up @@ -34,26 +34,26 @@ def http_fetch(endpoint, params=None, response_code=200, body=None):
assert res.code == response_code
return res.body


def valgrind_cmd(cmd, *options):
def valgrind_cmd(test_output_dir, *options):
assert isinstance(options, (list, tuple))
assert cmd.startswith("/"), "valgrind_cmd must take a fully qualified executible path not %s" % cmd
test_output_dir = os.path.join(os.path.dirname(cmd), "test_output")
return [
cmdlist = list(options)
if '--no-valgrind' not in sys.argv:
cmdlist = [
'valgrind',
'-v',
'--tool=memcheck',
'--trace-children=yes',
# '--demangle=yes',
'--log-file=%s/vg.out' % test_output_dir,
'--leak-check=full',
'--show-reachable=yes',
#'--show-reachable=yes',
'--run-libc-freeres=yes',
'%s' % cmd,
] + list(options)

] + cmdlist
return cmdlist

def check_valgrind_output(filename):
if '--no-valgrind' in sys.argv:
return

assert os.path.exists(filename)
time.sleep(.15)
vg_output = open(filename, 'r').readlines()
Expand All @@ -75,30 +75,26 @@ def check_valgrind_output(filename):
assert lost
assert lost[0] == "possibly lost: 0 bytes in 0 blocks"


class SubprocessTest(unittest.TestCase):
process_options = []
binary_name = ""
working_dir = None
def setUp(self):
"""setup method that starts up mongod instances using `self.mongo_options`"""
test_output_dir = None

@classmethod
def setUpClass(self):
"""setup method that starts up target instances using `self.process_options`"""
self.temp_dirs = []
self.processes = []
assert self.binary_name, "you must override self.binary_name"
assert self.working_dir, "set workign dir to os.path.dirname(__file__)"

exe = os.path.join(self.working_dir, self.binary_name)
if os.path.exists(exe):
logging.info('removing old %s' % exe)
os.unlink(exe)

# make should update the executable if needed
logging.info('running make')
pipe = subprocess.Popen(['make'])
pipe.wait()

assert os.path.exists(exe), "compile failed"
pipe = subprocess.Popen(['make', '-C', self.working_dir])
assert pipe.wait() == 0, "compile failed"

test_output_dir = os.path.join(self.working_dir, "test_output")
test_output_dir = self.test_output_dir
if os.path.exists(test_output_dir):
logging.info('removing %s' % test_output_dir)
pipe = subprocess.Popen(['rm', '-rf', test_output_dir])
Expand All @@ -108,16 +104,14 @@ def setUp(self):
os.makedirs(test_output_dir)

for options in self.process_options:

logging.info(' '.join(options))
# self.stdout = open(test_output_dir + '/test.out', 'w')
# self.stderr = open(test_output_dir + '/test.err', 'w')
pipe = subprocess.Popen(options)#, stdout=self.stdout, stderr=self.stderr)
pipe = subprocess.Popen(options)
self.processes.append(pipe)
logging.debug('started process %s' % pipe.pid)

self.wait_for('http://127.0.0.1:8080/', max_time=5)
self.wait_for('http://127.0.0.1:8080/', max_time=9)

@classmethod
def wait_for(self, url, max_time):
# check up to 15 times till the endpoint specified is available waiting max_time
step = max_time / float(15)
Expand All @@ -131,15 +125,17 @@ def wait_for(self, url, max_time):
pass
time.sleep(step)

@classmethod
def graceful_shutdown(self):
try:
http_fetch('/exit', dict())
except:
# we never get a reply if this works correctly
time.sleep(1)

def tearDown(self):
"""teardown method that cleans up child mongod instances, and removes their temporary data files"""
@classmethod
def tearDownClass(self):
"""teardown method that cleans up child target instances, and removes their temporary data files"""
logging.debug('teardown')
try:
self.graceful_shutdown()
Expand Down
7 changes: 0 additions & 7 deletions simplehttp/simplehttp.h
Expand Up @@ -7,13 +7,6 @@
#include <evhttp.h>

#define SIMPLEHTTP_VERSION "0.1.3"
#ifndef DUPE_N_TERMINATE
#define DUPE_N_TERMINATE(buf, len, tmp) \
tmp = malloc((len) + 1); \
memcpy(tmp, buf, (len)); \
tmp[(len)] = '\0'; \
buf = tmp;
#endif

#if _POSIX_TIMERS > 0

Expand Down
6 changes: 3 additions & 3 deletions simplehttp/util.c
@@ -1,3 +1,4 @@
#define _GNU_SOURCE // for strndup()
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
Expand Down Expand Up @@ -58,7 +59,6 @@ uint64_t ninety_five_percent(int64_t *int_array, int length)
int simplehttp_parse_url(char *endpoint, size_t endpoint_len, char **address, int *port, char **path)
{
// parse out address, port, path
char *tmp = NULL;
char *tmp_port = NULL;
char *tmp_pointer;
size_t address_len;
Expand Down Expand Up @@ -98,8 +98,8 @@ int simplehttp_parse_url(char *endpoint, size_t endpoint_len, char **address, in
}

path_len = (endpoint + endpoint_len) - *path;
DUPE_N_TERMINATE(*address, address_len, tmp);
DUPE_N_TERMINATE(*path, path_len, tmp);
*address = strndup(*address, address_len);
*path = strndup(*path, path_len);

return 1;
}
Expand Down
19 changes: 11 additions & 8 deletions simpleleveldb/Makefile
Expand Up @@ -11,16 +11,19 @@ AR = ar
AR_FLAGS = rc
RANLIB = ranlib

all: simpleleveldb leveldb_to_csv csv_to_leveldb
TARGETS = simpleleveldb leveldb_to_csv csv_to_leveldb

leveldb_to_csv: leveldb_to_csv.c
$(CC) $(CFLAGS) -o $@ leveldb_to_csv.c $(LIBS)
SOURCES_simpleleveldb = simpleleveldb.c str_list_set.c
SOURCES_leveldb_to_csv = leveldb_to_csv.c
SOURCES_csv_to_leveldb = csv_to_leveldb.c

csv_to_leveldb: csv_to_leveldb.c
$(CC) $(CFLAGS) -o $@ csv_to_leveldb.c $(LIBS)
all: $(TARGETS)

simpleleveldb: simpleleveldb.c
$(CC) $(CFLAGS) -o $@ simpleleveldb.c $(LIBS)
-include $(TARGETS:%=%.deps)

$(TARGETS): %: %.c
$(CC) $(CFLAGS) -MM -MT $@ -MF $@.deps $(SOURCES_$@)
$(CC) $(CFLAGS) -o $@ $(SOURCES_$@) $(LIBS)

install:
/usr/bin/install -d $(TARGET)/bin/
Expand All @@ -29,4 +32,4 @@ install:
/usr/bin/install csv_to_leveldb $(TARGET)/bin/

clean:
rm -rf *.a *.o simpleleveldb leveldb_to_csv csv_to_leveldb *.dSYM
rm -rf *.a *.o *.deps *.dSYM $(TARGETS)
82 changes: 69 additions & 13 deletions simpleleveldb/README.md
Expand Up @@ -51,39 +51,95 @@ OPTIONS
API endpoints:

* /get
parameters: `key`, `format`

parameters: `key`

* /mget

parameters: `key` (multiple), `format`
parameters: `key` (multiple)

* /fwmatch

parameters: `key`, `limit`
parameters: `key`, `limit` (default 500)

* /range_match

parameters: `start`, `end`, `limit`
parameters: `start`, `end`, `limit` (default 500)

* /put

parameters: `key`, `value`, `format`
parameters: `key`, `value`

Note: `value` can also be specified as the raw POST body content

* /mput

takes CSV values in the body of the request.

Note: takes separator-separated key/value pairs in separate lines in the POST body

* /list_append

parameters: `key`, `value` (multiple)

* /list_prepend

parameters: `key`, `value` (multiple)

* /list_remove

parameters: `key`, `value` (multiple)

* /list_pop

parameters: `key`, `position` (default 0), `count` (default 1)

Note: a negative position does a reverse count from the end of the list

* /set_add

parameters: `key`, `value` (multiple)

* /set_remove

parameters: `key`, `value` (multiple)

* /set_pop

parameters: `key`, `count` (default 1)

* /dump_csv

parameters: `key` (optional)

Note: dumps the entire database starting at `key` or else at the beginning, in txt format csv

* /del

parameters: `key`, `format`
parameters: `key`

* /stats

* /exit (cause the current process to exit)

* /exit

Note: causes the process to exit

All endpoints take a `format` parameter which affects whether error conditions
are represented by the HTTP response code (format=txt) or by the "status_code"
member of the json result (format=json) (in which case the HTTP response code
is always 200 if the server isn't broken). `format` also affects the output
data for all endpoints except /put, /mput, /exit, /del, and /dump_csv.

Output data in json format is under the "data" member of the root json object,
sometimes as a string (/get), sometimes as an array (/mget), sometimes as an
object with some metadata (/list_remove).

Most endpoints take a `separator` parameter which defaults to "," (but can be
set to any single character), which affects txt format output data. It also
affects the deserialization and serialization of lists and sets stored in the
db, and the input parsing of /mput.

All list and set endpoints take a `return_data` parameter; set it to 1 to additionally
return the new value of the list or set. However, this doesn't work for list_pop
or set_pop endpoints in txt format.

Utilities
---------
Expand Down

0 comments on commit 449795d

Please sign in to comment.