Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP

Comparing changes

Choose two branches to see what's changed or to start a new pull request. If you need to, you can also compare across forks.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also compare across forks.
base fork: Fleurer/Virtue
base: 9aac7d49cb
...
head fork: Fleurer/Virtue
compare: e317b12ea8
Checking mergeability… Don't worry, you can still create the pull request.
  • 15 commits
  • 22 files changed
  • 0 commit comments
  • 1 contributor
View
2  Rakefile
@@ -1,3 +1,5 @@
+$: << File.dirname(__FILE__)
+
require "tool/compile.rb"
require "tool/test.rb"
View
18 src/buf.h
@@ -0,0 +1,18 @@
+#ifndef BUF_H
+#define BUF_H
+
+#include <stddef.h>
+
+#define VT_BUFSIZ 4096
+
+typedef struct vt_buf_entry {
+ char buf[VT_BUFSIZ];
+ size_t size;
+ struct vt_buf_entry *next;
+} vt_buf_entry_t;
+
+typedef struct vt_buf {
+ struct vt_buf_entry entry;
+} vt_buf_t;
+
+#endif
View
130 src/buffer.cpp
@@ -1,130 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/select.h>
-#include <netinet/in.h>
-//
-#include "vt/vt.h"
-
-vbuf_head_t* vbuf_head_new(int type){
- vbuf_head_t *bhp;
-
- bhp = (vbuf_head_t*)malloc(sizeof(vbuf_head_t));
- bhp->bh_type = type;
- bhp->bh_bufs = new list<vbuf_t*>();
- return bhp;
-}
-
-int vbuf_head_free(vbuf_head_t *bhp){
- vbuf_t *bp;
- list<vbuf_t*> *lp;
- list<vbuf_t*>::iterator pos;
-
- lp = bhp->bh_bufs;
- for (pos=lp->begin(); pos!=lp->end(); ++pos) {
- bp = *pos;
- vbuf_free(bp);
- }
- delete bhp->bh_bufs;
- return 0;
-}
-
-/* ------------------------------------ */
-
-/* returns 0 on finished. */
-int vbuf_read(int fd, vbuf_head_t *bhp){
- vbuf_t *bp;
- int r;
-
- bp = vbuf_append(bhp);
- r = recv(fd, bp->b_data, BUF_SIZE, 0);
- if (r < 0)
- return r;
- if (r == 0) {
- vbuf_erase_last(bhp);
- }
- return r;
-}
-
-/* pop the first buffer of the output buffer queue,
- * and write it out.
- * returns -1 on error, or 0 on finished. */
-int vbuf_write(int fd, vbuf_head_t *bhp){
- vbuf_t *bp;
- int r;
-
- bp = bhp->bh_bufs->front();
- r = send(fd, bp->b_data, BUF_SIZE, 0);
- if (r < 0) {
- }
- if (r == 0) {
- }
- vbuf_erase_first(bhp);
- return r;
-}
-
-/* ------------------------------------ */
-
-/* */
-vbuf_t* vbuf_append(vbuf_head_t *bhp) {
- list<vbuf_t*> *lp;
- vbuf_t *bp;
-
- bp = vbuf_alloc();
- lp = bhp->bh_bufs;
- lp->push_back(bp);
- return bp;
-}
-
-/* called on the end */
-int vbuf_erase_last(vbuf_head_t *bhp){
- list<vbuf_t*> *lp;
- vbuf_t *bp;
-
- lp = bhp->bh_bufs;
- bp = lp->back();
- if (bp != 0) {
- vbuf_free(bp);
- return 1;
- }
- lp->pop_back();
- return 0;
-}
-
-/* called on each write action */
-int vbuf_erase_first(vbuf_head_t *bhp){
- list<vbuf_t*> *lp;
- vbuf_t *bp;
-
- lp = bhp->bh_bufs;
- bp = lp->front();
- if (bp != 0) {
- vbuf_free(bp);
- return 1;
- }
- lp->pop_front();
- return 0;
-}
-
-/* ----------------------------------------- */
-
-/* TODO: add a freelist here, and check the return value of malloc.
- * */
-vbuf_t* vbuf_alloc(){
- vbuf_t *bp;
-
- bp = (vbuf_t*)malloc(sizeof(vbuf_t));
- bp->b_size = 0;
- bp->b_data = (char*)malloc(BUF_SIZE);
- return bp;
-}
-
-int vbuf_free(vbuf_t* bp){
- free(bp->b_data);
- free(bp);
- return 0;
-}
View
45 src/conn.c
@@ -0,0 +1,45 @@
+#include "vt.h"
+#include "event.h"
+#include "pool.h"
+#include "conn.h"
+
+static void vt_conn_on_readable(vt_event_t *ev);
+
+/* ----------------------------------- */
+
+vt_conn_t* vt_accept(int sockfd) {
+ int fd;
+ vt_conn_t *conn;
+ socklen_t socklen;
+
+ conn = (vt_conn_t*)vt_malloc(sizeof(vt_conn_t));
+ socklen = sizeof(conn->sockaddr);
+ fd = accept(sockfd, (struct sockaddr*)&conn->sockaddr, &socklen);
+ if (fd < 0) {
+ vt_free(conn);
+ return NULL;
+ }
+ //
+ conn->fd = fd;
+ conn->pool = vt_pool_create();
+ vt_event_init(&conn->event, conn->fd, EV_READ | EV_WRITE, conn);
+ vt_event_bind(&conn->event, EV_READ, (vt_event_cb_t)&vt_conn_on_readable);
+ return conn;
+}
+
+void vt_conn_destroy(vt_conn_t *conn) {
+ close(conn->fd);
+ vt_pool_destroy(conn->pool);
+ vt_free(conn);
+}
+
+/* ------------------------------- */
+
+static void vt_conn_on_readable(vt_event_t *ev) {
+ char buf[1024];
+ int n;
+
+ n = read(ev->fd, buf, 1024);
+ buf[n] = '\0';
+ printf("%s\n", buf);
+}
View
20 src/conn.cpp
@@ -1,20 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/select.h>
-#include <netinet/in.h>
-//
-#include "vt/vt.h"
-
-vconn_t* vconn_new(int fd) {
- vconn_t *cp = (vconn_t*)malloc(sizeof(vconn_t));
- cp->c_fd = fd;
- cp->c_stat = 0;
- cp->c_buf_read = vbuf_head_new(BUF_READ);
- cp->c_buf_write = vbuf_head_new(BUF_WRITE);
- return cp;
-}
View
16 src/conn.h
@@ -0,0 +1,16 @@
+#ifndef CONN_H
+#define CONN_H
+
+struct vt_pool;
+
+typedef struct vt_conn {
+ int fd;
+ struct vt_event event;
+ struct sockaddr_in sockaddr;
+ struct vt_pool *pool;
+} vt_conn_t;
+
+vt_conn_t* vt_accept(int sockfd);
+void vt_conn_destroy(vt_conn_t *conn);
+
+#endif
View
93 src/event.c
@@ -0,0 +1,93 @@
+#include "vt.h"
+#include "event.h"
+#include "buf.h"
+#include "conn.h"
+
+int vt_cycle_init(vt_cycle_t *cl) {
+ TAILQ_INIT(&cl->io_event_entries);
+ cl->max_fd = 0;
+ FD_ZERO(&cl->read_fds);
+ FD_ZERO(&cl->write_fds);
+ FD_ZERO(&cl->except_fds);
+ return 0;
+}
+
+int vt_event_process(vt_cycle_t *cl) {
+ int r;
+ vt_event_t *ev;
+ vt_event_cb_t cb;
+
+ while ((r = select(cl->max_fd + 1, &cl->read_fds, &cl->write_fds, NULL, NULL))) {
+ vt_log("new cycle\n");
+ if (r < 0) {
+ vt_log("select() failed: %d, %s\n", errno, strerror(errno));
+ continue;
+ }
+ TAILQ_FOREACH(ev, &cl->io_event_entries, entry) {
+ // in read
+ if (FD_ISSET(ev->fd, &cl->read_fds)) {
+ cb = ev->on_readable;
+ if (cb)
+ cb(ev);
+ }
+ // in write
+ if (FD_ISSET(ev->fd, &cl->write_fds)) {
+ cb = ev->on_readable;
+ if (cb)
+ cb(ev);
+ }
+ }
+ }
+ return 0;
+}
+
+/* ------------------ */
+
+int vt_event_init(vt_event_t *ev, int fd, int flag, vt_conn_t *conn) {
+ ev->flag = flag;
+ ev->fd = fd;
+ ev->on_readable = NULL;
+ ev->on_writeable = NULL;
+ ev->on_error = NULL;
+ ev->cycle = NULL;
+ ev->conn = conn;
+ return 0;
+}
+
+int vt_event_bind(vt_event_t *ev, int flag, vt_event_cb_t cb) {
+ if (flag & EV_READ) {
+ ev->on_readable = cb;
+ }
+ if (flag & EV_WRITE) {
+ ev->on_writeable = cb;
+ }
+ return 0;
+}
+
+int vt_event_add(vt_cycle_t *cl, vt_event_t *ev) {
+ assert(cl != NULL);
+ if (ev->flag & EV_READ) {
+ FD_SET(ev->fd, &cl->read_fds);
+ }
+ if (ev->flag & EV_WRITE) {
+ FD_SET(ev->fd, &cl->write_fds);
+ }
+ if (ev->fd > cl->max_fd) {
+ cl->max_fd = ev->fd;
+ }
+ ev->cycle = cl;
+ TAILQ_INSERT_TAIL(&cl->io_event_entries, ev, entry);
+ vt_log("added event, fd: %d, max_fd: %d\n", ev->fd, cl->max_fd);
+ return 0;
+}
+
+int vt_event_remove(vt_cycle_t *cl, vt_event_t *ev) {
+ if (ev->flag & EV_READ) {
+ FD_CLR(ev->fd, &cl->read_fds);
+ }
+ if (ev->flag & EV_WRITE) {
+ FD_CLR(ev->fd, &cl->write_fds);
+ }
+ TAILQ_REMOVE(&cl->io_event_entries, ev, entry);
+ return 0;
+}
View
55 src/event.h
@@ -0,0 +1,55 @@
+#ifndef EVENT_H
+#define EVENT_H
+
+#include <stddef.h>
+#include <sys/select.h>
+#include "queue.h"
+
+struct vt_cycle;
+struct vt_event;
+struct vt_buf;
+
+typedef void (*vt_event_cb_t)(struct vt_event *);
+
+typedef struct vt_cycle {
+ TAILQ_HEAD(, vt_event) io_event_entries;
+ fd_set read_fds;
+ fd_set write_fds;
+ fd_set except_fds;
+ size_t max_fd;
+} vt_cycle_t;
+
+
+/* note that vt_event do NOT own any resource, so
+ * it doesn't require any destruction. */
+typedef struct vt_event {
+ /* public */
+ int fd;
+ int flag;
+ vt_event_cb_t on_readable;
+ vt_event_cb_t on_writeable;
+ vt_event_cb_t on_error;
+ /* private */
+ TAILQ_ENTRY(vt_event) entry;
+ struct vt_buf *buf;
+ struct vt_conn *conn;
+ struct vt_cycle *cycle;
+} vt_event_t;
+
+typedef struct vt_timer {
+ /* TODO */
+} vt_timer_t;
+
+enum {
+ EV_READ = 0x1,
+ EV_WRITE = 0x10,
+};
+
+int vt_cycle_init(vt_cycle_t *cl);
+int vt_event_process(vt_cycle_t *cl);
+int vt_event_init(vt_event_t *ev, int fd, int flag, struct vt_conn *conn);
+int vt_event_bind(vt_event_t *ev, int flag, vt_event_cb_t cb);
+int vt_event_add(vt_cycle_t *elp, vt_event_t *ep);
+int vt_event_remove(vt_cycle_t *elp, vt_event_t *ep);
+
+#endif
View
41 src/main.c
@@ -0,0 +1,41 @@
+#include "vt.h"
+#include "event.h"
+#include "sock.h"
+#include "conn.h"
+
+#define PORT 4000
+#define LISTEN_SIZ 10
+
+int vt_sockfd_on_readable(vt_event_t *ev) {
+ vt_conn_t *conn;
+
+ conn = vt_accept(ev->fd);
+ if (conn == NULL) {
+ vt_log("new connection failed\n");
+ }
+ vt_event_add(ev->cycle, &conn->event);
+ vt_log("new connection established, fd: %d\n", conn->fd);
+ return 0;
+}
+
+int vt_main() {
+ vt_cycle_t cl;
+ vt_event_t ev;
+ int sockfd;
+
+ vt_cycle_init(&cl);
+ sockfd = vt_sockfd_new();
+ if (sockfd < 0) {
+ return -1;
+ }
+ vt_event_init(&ev, sockfd, EV_READ, NULL);
+ vt_event_bind(&ev, EV_READ, (vt_event_cb_t)&vt_sockfd_on_readable);
+ vt_event_add(&cl, &ev);
+ vt_event_process(&cl);
+ return 0;
+}
+
+int main(int argc, char *argv[]) {
+ vt_main();
+ return 0;
+}
View
51 src/pool.c
@@ -0,0 +1,51 @@
+#include <stdlib.h>
+#include "pool.h"
+#include "vt.h"
+
+vt_pool_t* vt_pool_create() {
+ vt_pool_t *pl;
+
+ pl = (vt_pool_t*)vt_malloc(sizeof(vt_pool_t));
+ STAILQ_INIT(&pl->entries);
+ return pl;
+}
+
+void vt_pool_destroy(vt_pool_t *pl) {
+ vt_pool_entry_t *pe, *npe;
+ size_t nobj = 0;
+
+ for (pe = STAILQ_FIRST(&pl->entries); pe; pe = npe, nobj++) {
+ npe = STAILQ_NEXT(pe, entry);
+ free(pe->mem);
+ free(pe);
+ }
+ vt_log("free pool: %p, %d objects freed.\n", pl, nobj);
+}
+
+void* vt_palloc(vt_pool_t *pl, size_t size) {
+ vt_pool_entry_t *pe;
+
+ pe = (vt_pool_entry_t*)vt_malloc(sizeof(vt_pool_entry_t));
+ pe->mem = vt_malloc(size);
+ pe->size = size;
+ STAILQ_INSERT_TAIL(&pl->entries, pe, entry);
+ return pe->mem;
+}
+
+/* ----------------------------- */
+
+void *vt_malloc(size_t size) {
+ void *mem;
+
+ mem = malloc(size);
+ if (mem == NULL) {
+ vt_log("out of memory");
+ }
+ return mem;
+}
+
+void vt_free(void *mem) {
+ free(mem);
+}
+
+
View
31 src/pool.h
@@ -0,0 +1,31 @@
+#ifndef POOL_H
+#define POOL_H
+
+#include <stddef.h>
+#include "queue.h"
+
+/* A quick and dirty memory pool implementation, just for making
+ * memory deallocating easier yet. It should be replaced with a
+ * chunk allocating mechanism later.
+ * */
+
+struct vt_pool_entry;
+
+typedef struct vt_pool {
+ STAILQ_HEAD(, vt_pool_entry) entries;
+} vt_pool_t;
+
+typedef struct vt_pool_entry {
+ size_t size;
+ void *mem;
+ STAILQ_ENTRY(vt_pool_entry) entry;
+} vt_pool_entry_t;
+
+vt_pool_t* vt_pool_create();
+void vt_pool_destroy(vt_pool_t *pl);
+void* vt_palloc(vt_pool_t *pl, size_t size);
+
+void *vt_malloc(size_t size);
+void vt_free(void *mem);
+
+#endif
View
559 src/queue.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD: src/sys/sys/queue.h,v 1.32.2.6 2001/12/18 10:09:02 ru Exp $
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+#define __offsetof(type, field) ((size_t)(&((type *)0)->field))
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists, tail queues, and circular queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ * SLIST LIST STAILQ TAILQ CIRCLEQ
+ * _HEAD + + + + +
+ * _HEAD_INITIALIZER + + + + +
+ * _ENTRY + + + + +
+ * _INIT + + + + +
+ * _EMPTY + + + + +
+ * _FIRST + + + + +
+ * _NEXT + + + + +
+ * _PREV - - - + +
+ * _LAST - - + + +
+ * _FOREACH + + + + +
+ * _FOREACH_REVERSE - - - + +
+ * _INSERT_HEAD + + + + +
+ * _INSERT_BEFORE - + - + +
+ * _INSERT_AFTER + + + + +
+ * _INSERT_TAIL - - + + +
+ * _REMOVE_HEAD + - + - -
+ * _REMOVE + + + + +
+ *
+ */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+
+#define SLIST_FIRST(head) ((head)->slh_first)
+
+#define SLIST_FOREACH(var, head, field) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var); \
+ (var) = SLIST_NEXT((var), field))
+
+#define SLIST_INIT(head) do { \
+ SLIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
+ SLIST_NEXT((slistelm), field) = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
+ SLIST_FIRST((head)) = (elm); \
+} while (0)
+
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ if (SLIST_FIRST((head)) == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = SLIST_FIRST((head)); \
+ while (SLIST_NEXT(curelm, field) != (elm)) \
+ curelm = SLIST_NEXT(curelm, field); \
+ SLIST_NEXT(curelm, field) = \
+ SLIST_NEXT(SLIST_NEXT(curelm, field), field); \
+ } \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first;/* first element */ \
+ struct type **stqh_last;/* addr of last next element */ \
+}
+
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
+
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for((var) = STAILQ_FIRST((head)); \
+ (var); \
+ (var) = STAILQ_NEXT((var), field))
+
+#define STAILQ_INIT(head) do { \
+ STAILQ_FIRST((head)) = NULL; \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_NEXT((tqelm), field) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_FIRST((head)) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ STAILQ_NEXT((elm), field) = NULL; \
+ *(head)->stqh_last = (elm); \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (0)
+
+#define STAILQ_LAST(head, type, field) \
+ (STAILQ_EMPTY(head) ? \
+ NULL : \
+ ((struct type *) \
+ ((char *)((head)->stqh_last) - __offsetof(struct type, field))))
+
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ if (STAILQ_FIRST((head)) == (elm)) { \
+ STAILQ_REMOVE_HEAD(head, field); \
+ } \
+ else { \
+ struct type *curelm = STAILQ_FIRST((head)); \
+ while (STAILQ_NEXT(curelm, field) != (elm)) \
+ curelm = STAILQ_NEXT(curelm, field); \
+ if ((STAILQ_NEXT(curelm, field) = \
+ STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((curelm), field);\
+ } \
+} while (0)
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if ((STAILQ_FIRST((head)) = \
+ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
+ if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+/*
+ * List declarations.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+
+#define LIST_FIRST(head) ((head)->lh_first)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = LIST_FIRST((head)); \
+ (var); \
+ (var) = LIST_NEXT((var), field))
+
+#define LIST_INIT(head) do { \
+ LIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+ LIST_NEXT((listelm), field)->field.le_prev = \
+ &LIST_NEXT((elm), field); \
+ LIST_NEXT((listelm), field) = (elm); \
+ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ LIST_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
+ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+ LIST_FIRST((head)) = (elm); \
+ (elm)->field.le_prev = &LIST_FIRST((head)); \
+} while (0)
+
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_REMOVE(elm, field) do { \
+ if (LIST_NEXT((elm), field) != NULL) \
+ LIST_NEXT((elm), field)->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = LIST_NEXT((elm), field); \
+} while (0)
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var); \
+ (var) = TAILQ_NEXT((var), field))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var); \
+ (var) = TAILQ_PREV((var), headname, field))
+
+#define TAILQ_INIT(head) do { \
+ TAILQ_FIRST((head)) = NULL; \
+ (head)->tqh_last = &TAILQ_FIRST((head)); \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ TAILQ_NEXT((listelm), field) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ TAILQ_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
+ TAILQ_FIRST((head))->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ TAILQ_FIRST((head)) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ TAILQ_NEXT((elm), field) = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+} while (0)
+
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if ((TAILQ_NEXT((elm), field)) != NULL) \
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
+} while (0)
+
+/*
+ * Circular queue declarations.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head) \
+ { (void *)&(head), (void *)&(head) }
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
+
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+
+#define CIRCLEQ_FOREACH(var, head, field) \
+ for ((var) = CIRCLEQ_FIRST((head)); \
+ (var) != (void *)(head); \
+ (var) = CIRCLEQ_NEXT((var), field))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for ((var) = CIRCLEQ_LAST((head)); \
+ (var) != (void *)(head); \
+ (var) = CIRCLEQ_PREV((var), field))
+
+#define CIRCLEQ_INIT(head) do { \
+ CIRCLEQ_FIRST((head)) = (void *)(head); \
+ CIRCLEQ_LAST((head)) = (void *)(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ CIRCLEQ_NEXT((elm), field) = CIRCLEQ_NEXT((listelm), field); \
+ CIRCLEQ_PREV((elm), field) = (listelm); \
+ if (CIRCLEQ_NEXT((listelm), field) == (void *)(head)) \
+ CIRCLEQ_LAST((head)) = (elm); \
+ else \
+ CIRCLEQ_PREV(CIRCLEQ_NEXT((listelm), field), field) = (elm);\
+ CIRCLEQ_NEXT((listelm), field) = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ CIRCLEQ_NEXT((elm), field) = (listelm); \
+ CIRCLEQ_PREV((elm), field) = CIRCLEQ_PREV((listelm), field); \
+ if (CIRCLEQ_PREV((listelm), field) == (void *)(head)) \
+ CIRCLEQ_FIRST((head)) = (elm); \
+ else \
+ CIRCLEQ_NEXT(CIRCLEQ_PREV((listelm), field), field) = (elm);\
+ CIRCLEQ_PREV((listelm), field) = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ CIRCLEQ_NEXT((elm), field) = CIRCLEQ_FIRST((head)); \
+ CIRCLEQ_PREV((elm), field) = (void *)(head); \
+ if (CIRCLEQ_LAST((head)) == (void *)(head)) \
+ CIRCLEQ_LAST((head)) = (elm); \
+ else \
+ CIRCLEQ_PREV(CIRCLEQ_FIRST((head)), field) = (elm); \
+ CIRCLEQ_FIRST((head)) = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ CIRCLEQ_NEXT((elm), field) = (void *)(head); \
+ CIRCLEQ_PREV((elm), field) = CIRCLEQ_LAST((head)); \
+ if (CIRCLEQ_FIRST((head)) == (void *)(head)) \
+ CIRCLEQ_FIRST((head)) = (elm); \
+ else \
+ CIRCLEQ_NEXT(CIRCLEQ_LAST((head)), field) = (elm); \
+ CIRCLEQ_LAST((head)) = (elm); \
+} while (0)
+
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+
+#define CIRCLEQ_NEXT(elm,field) ((elm)->field.cqe_next)
+
+#define CIRCLEQ_PREV(elm,field) ((elm)->field.cqe_prev)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if (CIRCLEQ_NEXT((elm), field) == (void *)(head)) \
+ CIRCLEQ_LAST((head)) = CIRCLEQ_PREV((elm), field); \
+ else \
+ CIRCLEQ_PREV(CIRCLEQ_NEXT((elm), field), field) = \
+ CIRCLEQ_PREV((elm), field); \
+ if (CIRCLEQ_PREV((elm), field) == (void *)(head)) \
+ CIRCLEQ_FIRST((head)) = CIRCLEQ_NEXT((elm), field); \
+ else \
+ CIRCLEQ_NEXT(CIRCLEQ_PREV((elm), field), field) = \
+ CIRCLEQ_NEXT((elm), field); \
+} while (0)
+
+#ifdef _KERNEL
+
+/*
+ * XXX insque() and remque() are an old way of handling certain queues.
+ * They bogusly assumes that all queue heads look alike.
+ */
+
+struct quehead {
+ struct quehead *qh_link;
+ struct quehead *qh_rlink;
+};
+
+#ifdef __GNUC__
+
+static __inline void
+insque(void *a, void *b)
+{
+ struct quehead *element = (struct quehead *)a,
+ *head = (struct quehead *)b;
+
+ element->qh_link = head->qh_link;
+ element->qh_rlink = head;
+ head->qh_link = element;
+ element->qh_link->qh_rlink = element;
+}
+
+static __inline void
+remque(void *a)
+{
+ struct quehead *element = (struct quehead *)a;
+
+ element->qh_link->qh_rlink = element->qh_rlink;
+ element->qh_rlink->qh_link = element->qh_link;
+ element->qh_rlink = 0;
+}
+
+#else /* !__GNUC__ */
+
+void insque __P((void *a, void *b));
+void remque __P((void *a));
+
+#endif /* __GNUC__ */
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_QUEUE_H_ */
View
49 src/sock.c
@@ -0,0 +1,49 @@
+#include <stdio.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/select.h>
+#include <sys/fcntl.h>
+#include <netinet/in.h>
+#include "vt.h"
+#include "event.h"
+#include "sock.h"
+
+#define PORT 4000
+#define LISTEN_SIZ 10
+
+int vt_sockfd_new(){
+ int r, opt, oflag;
+ int sockfd;
+ struct sockaddr_in servaddr;
+
+ // initialize the sock fd
+ sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (sockfd < 0) {
+ vt_log("bad socket.");
+ return -1;
+ }
+ opt = 1;
+ setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
+ // set up non-blocking
+ oflag = fcntl(sockfd, F_GETFL, 0);
+ fcntl(sockfd, F_SETFL, oflag | O_NONBLOCK);
+ // set up sockaddr
+ servaddr.sin_family = AF_INET;
+ servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
+ servaddr.sin_port = htons(PORT);
+ //
+ r = bind(sockfd, (struct sockaddr*)&servaddr, sizeof(servaddr));
+ if (r < 0) {
+ vt_log("bad bind.");
+ return -1;
+ }
+ //
+ r = listen(sockfd, LISTEN_SIZ);
+ if (r < 0) {
+ vt_log("bad listen.");
+ return -1;
+ }
+ vt_log("listening on port: %d\n", PORT);
+ return sockfd;
+}
+
View
6 src/sock.h
@@ -0,0 +1,6 @@
+#ifndef SOCK_H
+#define SOCK_H
+
+int vt_sockfd_new();
+
+#endif
View
136 src/vt.cpp
@@ -1,136 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <string.h>
-#include <assert.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/select.h>
-#include <netinet/in.h>
-//
-#include "vt/vt.h"
-
-#define LISTEN_SIZ 10
-#define PORT 8888
-
-vconn_t *connmap[FD_SETSIZE] = {NULL, };
-
-
-int main(int argc, char **argv){
- int r, tmp, oflag;
- int port;
- int sockfd;
- struct sockaddr_in servaddr;
-
- sockfd = socket(AF_INET, SOCK_STREAM, 0);
- if (sockfd < 0)
- vpanic("bad socket.");
- tmp = 1;
- setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
- // set up non-blocking
- oflag = fcntl(sockfd, F_GETFL, 0);
- fcntl(sockfd, F_SETFL, oflag | O_NONBLOCK);
-
- port = PORT;
-
- servaddr.sin_family = AF_INET;
- servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
- servaddr.sin_port = htons(port);
-
- r = bind(sockfd, (struct sockaddr*) &servaddr, sizeof(servaddr));
- if (r < 0)
- vpanic("bad bind.");
-
- r = listen(sockfd, LISTEN_SIZ);
- if (r < 0)
- vpanic("bad listen.");
-
- vloop(sockfd);
-}
-
-int vloop(int sockfd){
- struct sockaddr_in cliaddr;
- size_t salen;
- int connfd, maxfd, fd;
- int oflag, r;
- fd_set rdset, wrset;
- vconn_t *cp;
- vbuf_head_t *bhp;
- vbuf_t *bp;
- //
-
- FD_ZERO(&rdset);
- FD_ZERO(&wrset);
-
- FD_SET(sockfd, &rdset);
- maxfd = sockfd;
-
- for(;;){
- r = select(maxfd+1, &rdset, &wrset, NULL, NULL);
- //printf("select() returns.\n");
- // on error
- if (r < 0) {
- if (errno == EINTR)
- continue;
- }
- // on accept ok
- if (FD_ISSET(sockfd, &rdset)) {
- fd = accept(sockfd, NULL, NULL);
- if (fd < 0) {
- if (errno==EWOULDBLOCK || errno==EINTR)
- continue;
- else
- vpanic("bad accept().");
- }
- // set it non-block
- oflag = fcntl(fd, F_GETFL, 0);
- fcntl(fd, F_SETFL, oflag | O_NONBLOCK);
- //
- FD_SET(fd, &rdset);
- cp = vconn_new(fd);
- connmap[fd] = cp;
- maxfd = max(fd, maxfd);
- continue;
- }
- // on reading & writing
- for (fd=0; fd<maxfd+1; fd++) {
- if (fd==sockfd) continue;
- // on read ready
- if (FD_ISSET(fd, &rdset)) {
- cp = connmap[fd];
- if (cp == NULL) {
- vpanic("bad conn entry");
- }
- bhp = cp->c_buf_read;
- r = vbuf_read(fd, bhp);
- if (r < 0) {
- if (errno==EWOULDBLOCK || errno==EINTR)
- continue;
- else
- vpanic("bad read.");
- }
- // recieve finished
- if (r == 0) {
- FD_CLR(fd, &rdset);
- FD_SET(fd, &wrset);
- on_read_complete(cp);
- }
- continue;
- }
- // on write ready
- if (FD_ISSET(fd, &wrset)) {
- cp = connmap[fd];
- if (cp == NULL) {
- vpanic("bad conn entry");
- }
- bhp = cp->c_buf_write;
- }
- }
- }
-}
-
-int on_read_complete(vconn_t *cp){
- printf("read complete\n");
-}
View
21 src/vt.h
@@ -0,0 +1,21 @@
+#ifndef VT_H
+#define VT_H
+
+#include <stdio.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/select.h>
+#include <netinet/in.h>
+
+#define NDEBUG 1
+
+/* TODO: for debug yet, the fprintf(stderr, ...) stuff is unavailable for daemons. */
+#define vt_log(fmt, ...) do { \
+ fprintf(stderr, "%s: " fmt, __func__, ##__VA_ARGS__); } while(0)
+
+#endif
View
45 src/vt/buffer.h
@@ -1,45 +0,0 @@
-#ifndef buf_H
-#define buf_H
-
-#include <list>
-#include "conn.h"
-
-typedef struct vbuf {
- char *b_data;
- int b_size;
-} vbuf_t;
-
-typedef struct vbuf_head {
- int bh_type;
- int bh_stat;
- vconn_t *bh_conn;
- std::list<vbuf_t*> *bh_bufs;
-} vbuf_head_t;
-
-/* type */
-#define BUF_READ 1
-#define BUF_WRITE 2
-
-/* stat */
-#define BUF_ERROR 0x1
-#define BUF_COMPLETE 0x2
-
-/* max size */
-#define BUF_SIZE 4
-
-// buf head
-vbuf_head_t* vbuf_head_new(int type);
-int vbuf_head_free(vbuf_head_t *);
-//
-vbuf_t* vbuf_append(vbuf_head_t *);
-int vbuf_erase_first(vbuf_head_t *bhp);
-int vbuf_erase_last(vbuf_head_t *);
-/**/
-int vbuf_free(vbuf_t* bp);
-vbuf_t* vbuf_alloc();
-/**/
-int vbuf_read(int fd, vbuf_head_t *bhp);
-int vbuf_write(int fd, vbuf_head_t *bhp);
-int on_buf_complete(struct vbuf_head *);
-
-#endif
View
28 src/vt/conn.h
@@ -1,28 +0,0 @@
-#ifndef CONN_H
-#define CONN_H
-
-#include "buffer.h"
-
-typedef struct vconn {
- int c_stat;
- int c_fd;
- struct vbuf_head *c_buf_read;
- struct vbuf_head *c_buf_write;
-} vconn_t;
-
-#define CONN_READY 0
-#define CONN_READING 1
-#define CONN_WRITING 2
-
-
-vconn_t* vconn_new(int fd);
-int vconn_close(struct vconn*);
-//
-int vread_begin(struct vconn*);
-int vwrite_begin(struct vconn*);
-//
-int on_read_complete(struct vconn*);
-int on_write_complete(struct vconn*);
-int on_conn_error(struct vconn*);
-
-#endif
View
14 src/vt/request.h
@@ -1,14 +0,0 @@
-#ifndef REQUEST_H
-#define REQUEST_H
-
-struct vrequest {
- int req_stat;
- int req_fd;
- vconn_t *req_conn;
-};
-
-struct vrequest* vrequest_parse(struct vconn*);
-int vrequest_free(struct vrequest*);
-int on_request(struct vrequest*);
-
-#endif
View
15 src/vt/response.h
@@ -1,15 +0,0 @@
-#ifndef RESPONSE_H
-#define RESPONSE_H
-
-struct vresponse {
- int rsp_stat;
- int rsp_env;
-};
-
-struct vresponse* vresponse_new();
-int vresponse_free(struct vresponse*);
-int vresponse_send(struct vresponse*);
-
-
-
-#endif
View
26 src/vt/vt.h
@@ -1,26 +0,0 @@
-#ifndef VT_H
-#define VT_H
-
-#include <map>
-#include <cstring>
-
-struct lstr {
- bool operator()(const char *str1, const char *str2){
- return strcmp(str1, str2) < 0;
- }
-};
-
-typedef std::map<const char*, char*, lstr> venv_t;
-
-#define vpanic(str) do{ printf("%s:%d %s\n", __FILE__, __LINE__, (str)); exit(1); } while(0)
-
-/* prototypes */
-int vloop(int listenfd);
-
-/* includes */
-#include "buffer.h"
-#include "conn.h"
-
-using namespace std;
-
-#endif
View
6 tool/compile.rb
@@ -2,11 +2,11 @@ def omap(path)
"bin/#{File.basename(path).ext('o')}"
end
-CC = 'g++'
-CFLAG = '-Wall'
+CC = 'gcc'
+CFLAG = '-Wall -Werror'
CINC = ''
-cfiles = FileList['src/*.cpp']
+cfiles = FileList['src/*.c']
hfiles = FileList['src/vt/*.h']
ofiles = cfiles.map{|path| omap(path) }

No commit comments for this range

Something went wrong with that request. Please try again.