Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

doc and other fixes

git-svn-id: http://sands.sce.ntu.edu.sg/svn/lluis/trunk@3 6fa9df1f-5871-4ed8-8e73-65a2acd6172f
  • Loading branch information...
commit 43c055bc435af00ebcc7e36dec6b2d232ceda2b3 0 parents
lluis authored
0  README
No changes.
0  clusterdfs/__init__.py
No changes.
87 clusterdfs/bufferedio.py
@@ -0,0 +1,87 @@
+import io
+import gevent
+import gevent.queue
+import gevent.socket
+import numpy
+
+class IOBuffer(object):
+ def __init__(self):
+ self.buff = bytearray(io.DEFAULT_BUFFER_SIZE)
+ #self.buff = numpy.ndarray(shape=(io.DEFAULT_BUFFER_SIZE), dtype=numpy.byte)
+ self.mem = memoryview(self.buff)
+ self.length = 0
+
+ def data(self):
+ return self.mem[0:self.length]
+
+class BufferedIO(object):
+ def __init__(self, callback, num_buffers=2):
+ self.write_queue = gevent.queue.Queue()
+ self.read_queue = gevent.queue.Queue()
+
+ for i in xrange(num_buffers):
+ self.write_queue.put(IOBuffer())
+
+ self.reader = gevent.spawn(self.process)
+ self.callback = callback
+ self.end_reading = False
+ self.processed = 0
+
+ def process(self):
+ while (not self.end_reading) or self.read_queue.qsize()>0:
+ iobuffer = self.read_queue.get()
+ self.callback(iobuffer.data())
+ self.write_queue.put(iobuffer)
+
+ def fill_buffer(self):
+ assert False, 'Unimplemented method'
+
+ def finished(self):
+ assert False, 'Unimplemented method'
+
+ def run(self):
+ while not self.finished():
+ iobuffer = self.write_queue.get()
+ self.processed += self.fill_buffer(iobuffer)
+ self.read_queue.put(iobuffer)
+
+ # Wait until reader finishes
+ self.end_reading = True
+ self.reader.join()
+
+class FileBufferedIO(BufferedIO):
+ def __init__(self, f, *args, **kwargs):
+ BufferedIO.__init__(self, *args, **kwargs)
+ self.fio = io.open(f, 'rb')
+ self.tofinish = False
+
+ def finished(self):
+ return self.tofinish
+
+ def fill_buffer(self, iobuffer):
+ assert not self.tofinish
+ num = self.fio.readinto(iobuffer.buff)
+ iobuffer.length = num
+ if num==0: self.tofinish = True
+ return num
+
+class SocketBufferedIO(BufferedIO):
+ def __init__(self, socket, size, *args, **kwargs):
+ BufferedIO.__init__(self, *args, **kwargs)
+ self.socket = socket
+ self.received = 0
+ self.size = size
+
+ def finished(self):
+ return self.received==self.size
+
+ def fill_buffer(self, iobuffer):
+ assert self.received<self.size
+
+ num = self.socket.recv_into(iobuffer.buff, min(len(iobuffer.buff), self.size-self.received))
+ if num>0:
+ iobuffer.length = num
+ self.received += num
+ return num
+ else:
+ raise IOError("Socket disconnected.")
121 clusterdfs/datablock.py
@@ -0,0 +1,121 @@
+import os.path
+import uuid
+import struct
+import hashlib
+import cStringIO
+
+class DataBlockHeader(object):
+ '''Each stored block has a 256 byte header. This is its binary structure (all little-endian):
+ + 32 bytes ( 0 -- 31) --> file uuid (hex formatted string)
+ + 32 bytes ( 32 -- 71) --> block uuid (hex formatted string)
+ + 8 bytes ( 72 -- 83) --> offset in the file (unsigned int)
+ + 8 bytes ( 84 -- 111) --> block size (unsigned int)
+ + 32 bytes (112 -- 143) --> block's SHA256 checksum (hex formatted string)
+ +112 bytes (144 -- 255) --> reserved for future uses
+ '''
+
+ SIZE = 256
+ FIELDS = ['file_uuid','block_uuid','offset','size','sha256']
+ DEFAULTS = [None, None, 0, 0, None]
+
+ def __init__(self, **kwargs):
+ for field, default in zip(self.FIELDS, self.DEFAULTS):
+ setattr(self, field, kwargs[field] if field in kwargs else default)
+
+ @classmethod
+ def parse(cls, f):
+ inst = DataBlockHeader()
+
+ inst.file_uuid = f.read(32)
+ inst.block_uuid = f.read(32)
+ inst.offset, = struct.unpack('<Q', f.read(8))
+ inst.size, = struct.unpack('<Q', f.read(8))
+ inst.sha256 = f.read(32)
+
+ f.seek(cls.SIZE)
+
+ return inst
+
+ def dump(self):
+ raw = cStringIO.StringIO()
+ raw.write(self.file_uuid)
+ raw.write(self.block_uuid)
+ raw.write(struct.pack('<Q', self.offset))
+ raw.write(struct.pack('<Q', self.size))
+ raw.write(self.sha256)
+ raw.write(' '*112)
+ data = raw.getvalue()
+ assert len(data)==self.SIZE
+ return data
+
+class DataBlock(file):
+ def init(self, header):
+ self.header = header
+ if self.mode == 'w':
+ self.sha256 = hashlib.sha256()
+
+ def write(self, data):
+ if self.mode == 'r':
+ raise IOError("Block is in read-only mode.")
+ if self.closed:
+ raise IOError("Block is closed.")
+ length = len(data)
+ self.sha256.update(data)
+ self.header.size += length
+ self.header.offset += length
+ file.write(self, data)
+
+ def close(self):
+ if self.mode=='w':
+ # Compute sha256 digest
+ digest = self.sha256.hexdigest()
+ if len(digest)<32:
+ digest = '0'*(32-len(digest)) + digest
+ self.header.sha256 = digest
+ # Sync header
+ self.seek(0)
+ file.write(self, self.header.dump())
+
+ file.close(self)
+
+ def __del__(self):
+ if not self.closed:
+ self.close()
+
+ def __inexistent__(self, *args, **kwargs):
+ assert False, "Inexistent."
+
+ readline = __inexistent__
+ writelines = __inexistent__
+
+ @staticmethod
+ def filename(file_uuid, block_uuid):
+ return '%s_%s'%(file_uuid, block_uuid)
+
+ @staticmethod
+ def create(base_dir='', file_uuid=None, block_uuid=None):
+ if file_uuid==None: file_uuid = '%032x'%uuid.uuid4().int
+ if block_uuid==None: block_uuid = '%032x'%uuid.uuid4().int
+
+ header = DataBlockHeader(file_uuid=file_uuid, block_uuid=block_uuid)
+ f = DataBlock(os.path.join(base_dir, DataBlock.filename(file_uuid, block_uuid)), 'w')
+ f.init(header)
+ f.seek(DataBlockHeader.SIZE)
+ return f
+
+ @staticmethod
+ def open(file_uuid, block_uuid, base_dir=''):
+ f = DataBlock(os.path.join(base_dir, DataBlock.filename(file_uuid, block_uuid)), 'r')
+ f.init(DataBlockHeader.parse(f))
+ return f
+
+if __name__=='__main__':
+ d = DataBlock.create(base_dir='dd1')
+ fid = d.header.file_uuid
+ bid = d.header.block_uuid
+ d.write('aaaaaaa\n asdasdasd')
+ d.close()
+
+ d = DataBlock.open(fid, bid, base_dir='dd1')
+ print d.read()
+ d.close()
202 clusterdfs/datanode.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+
+import io
+import os
+import os.path
+import errno
+import socket
+import logging
+import argparse
+
+from namenode import NameNodeHeader
+from networking import *
+from bufferedio import FileBufferedIO, SocketBufferedIO
+
+class DataNodeConfig(object):
+ port = 7777
+ bind_addr = '0.0.0.0'
+ datadir = 'datadir/'
+ namenode_addr = 'localhost'
+ namenode_port = 7770
+ ping_timeout = 10
+
+ def __init__(self, args):
+ for k, v in args.__dict__.iteritems():
+ if v!=None: self.__dict__[k] = v
+
+ if not self.datadir.endswith('/'):
+ self.datadir = self.datadir+'/'
+
+class DataNodeHeader(object):
+ OP_STORE = 0
+ OP_RETRIEVE = 1
+ OP_REMOVE = 2
+
+class DataNodeQuery(ServerHandle):
+ def process_query(self):
+ if self.header['op']==DataNodeHeader.OP_STORE:
+ return self.store_block()
+ elif self.header['op']==DataNodeHeader.OP_RETRIEVE:
+ return self.retrieve_block()
+ else:
+ assert False
+
+ def forward_block(self, dst_fd, dst_node):
+ def inner(data):
+ if dst_node: dst_node.socket.sendall(data)
+ dst_fd.write(data)
+ return inner
+
+ def store_block(self):
+ # Read block properties
+ block_id = self.header['id']
+ block_size = self.header['length']
+ logging.info("Receiving block '%s' (%d bytes) from %s.", block_id, block_size, self.address)
+
+ # Get the forward list and the next forward node
+ next_node = None
+ next_forward_list = []
+ if 'fwdlist' in self.header:
+ forward_list = self.header['fwdlist']
+ if forward_list:
+ logging.info("Forwarding '%s' to %s.", block_id, repr(forward_list[0]))
+ logging.info("Remaining forwards: %d.", len(forward_list)-1)
+ next_node = Client(*forward_list[0])
+ next_forward_list = forward_list[1:]
+
+ # Send header to next node
+ if next_node:
+ header = self.header.copy()
+ header['fwdlist'] = next_forward_list
+ next_node.send(header)
+
+ # Destination file.
+ dst_fd = io.open(os.path.join(self.server.config.datadir, block_id), 'wb')
+
+ try:
+ # Process incoming data
+ SocketBufferedIO(self.socket, block_size, self.forward_block(dst_fd, next_node)).run()
+
+ # Receive response from next_node
+ if next_node:
+ response = next_node.recv()
+ if response['code']==ServerResponse.RESPONSE_OK:
+ logging.info("Block '%s' (%d bytes) stored & forwarded successfully."%(block_id, block_size))
+ return ServerResponse.ok(msg='Block stored & forwarded successfully.')
+ else:
+ return response
+ else:
+ logging.info("Block '%s' (%d bytes) stored successfully."%(block_id, block_size))
+ return ServerResponse.ok(msg='Block stored successfully.')
+
+ except IOError:
+ logging.info("Transmission from %s failed.", self.address)
+ return ServerResponse.error(msg='Transmission failed.')
+
+ finally:
+ # Clean-up resources
+ if next_node: next_node.kill()
+ dst_fd.close()
+
+ def retrieve_block(self):
+ # Read block properties
+ block_id = self.header['id']
+ path = os.path.join(self.server.config.datadir, block_id)
+ block_size = os.path.getsize(path)
+ logging.info("Sending block '%s' (%d bytes) to %s."%(block_id, block_size, self.address))
+
+ # Send block size
+ self.send(block_size)
+
+ # Process block
+ for data in FileIterable(path):
+ self.socket.sendall(data)
+
+ return ServerResponse.ok(msg='Block retrieved successfully.')
+
+class DataNodeNotifier(object):
+ def __init__(self, config, server):
+ self.config = config
+ self.server = server
+ self.process = gevent.spawn(self.timeout)
+ self.ping = {'op':NameNodeHeader.OP_PING, 'datanode_port':self.config.port}
+
+ def stop(self):
+ self.process.kill()
+
+ def timeout(self):
+ while True:
+ # send ping
+ try:
+ logging.debug('Sending ping.')
+ ne = NetworkEndpoint(gevent.socket.create_connection((self.config.namenode_addr, self.config.namenode_port)))
+ ne.send(self.ping)
+ ne.send([])
+ response = ne.recv()
+ if response['code']!=ServerResponse.RESPONSE_OK:
+ logging.error('Error delivering ping to nameserver: %s', response['msg'])
+
+ except socket.error, (value,message):
+ logging.error("Error delivering ping to nameserver: %s."%(message))
+
+ # sleep timeout
+ gevent.sleep(self.config.ping_timeout)
+
+class DataNode(Server):
+ def __init__(self, config):
+ self.config = config
+ logging.info("Configuring DataNode to listen on localhost:%d"%(self.config.port))
+ Server.__init__(self, DataNodeQuery, port=self.config.port)
+ self.notifier = DataNodeNotifier(self.config, self)
+ self.lock_file = os.path.join(self.config.datadir, '.lock')
+
+ def init(self):
+ self.lock_datadir()
+ self.serve()
+
+ def finalize(self):
+ self.notifier.stop()
+ self.unlock_datadir()
+
+ def lock_datadir(self):
+ logging.info("Locking %s", self.lock_file)
+
+ if not os.path.exists(self.config.datadir):
+ raise Exception("DataNode cannot lock data dir (invalid datadir).")
+
+ elif os.path.exists(self.lock_file):
+ raise Exception("DataNode cannot lock data dir (locked dir).")
+
+ else:
+ open(self.lock_file, 'w').close()
+
+ def unlock_datadir(self):
+ assert os.path.exists(self.lock_file)
+ logging.info("Unlocking %s", self.lock_file)
+ os.remove(self.lock_file)
+
+if __name__ == '__main__':
+ logging.getLogger().setLevel(logging.DEBUG)
+
+ from processname import setprocname
+ if not setprocname():
+ logging.error('Cannot change the process for %s.', __file__)
+
+ parser = argparse.ArgumentParser(description='DataNode')
+ parser.add_argument('-d', action="store", default=None, dest="datadir", type=str, help="Directory to store raw data.")
+ parser.add_argument('-l', action="store", default="0.0.0.0", dest="bind_addr", type=str, help="DataNode binding address.")
+ parser.add_argument('-p', action="store", default=None, dest="port", type=int, help="Port where DataNode listens.")
+ parser.add_argument('-na', action="store", default='localhost', dest="namenode_addr", type=str, help="Address of the NameNode.")
+ parser.add_argument('-np', action="store", default=7770, dest="namenode_port", type=int, help="Port of the NameNode.")
+ config = DataNodeConfig(parser.parse_args())
+
+ try:
+ dn = DataNode(config)
+ dn.init()
+ dn.finalize()
+ except KeyboardInterrupt:
+ logging.info("Finalizing DataNode...")
+ dn.finalize()
+ except Exception:
+ logging.error("Fatal Error!!")
+ raise
45 clusterdfs/dfs.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+import sys
+import os.path
+from datanode import DataNodeHeader
+from bufferedio import FileBufferedIO
+from networking import *
+
+class DFS(Client):
+ def store(self, path):
+ length = os.path.getsize(path)
+ header = {'op':DataNodeHeader.OP_STORE, 'length':length, 'id':path.split('/')[-1]}
+ header['fwdlist'] = [('172.21.48.151',7777)]
+ print header
+ self.send(header)
+ FileBufferedIO(path, lambda d: self.socket.sendall(d)).run()
+ result = self.recv()
+ print result
+
+ def retrieve(self, name, path):
+ header = {'op':DataNodeHeader.OP_RETRIEVE, 'id':name}
+ print header
+ self.send(header)
+ size = self.recv()
+ f = open(path, 'wb')
+ for data in SocketIterable(self.socket, size):
+ f.write(data)
+ f.close()
+ result = self.recv()
+ print result
+
+if __name__=='__main__':
+ server = sys.argv[1]
+ port = int(sys.argv[2])
+ dfs = DFS(server, port)
+
+ operation = sys.argv[3]
+ if operation=='STORE':
+ path = sys.argv[4]
+ dfs.store(path)
+ elif operation=='RETRIEVE':
+ name = sys.argv[4]
+ path = sys.argv[5]
+ dfs.retrieve(name, path)
+ else:
+ assert False
90 clusterdfs/namenode.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+
+import time
+import logging
+import argparse
+import socket
+import collections
+from networking import *
+
+class NameNodeConfig(object):
+ port = 7770
+
+ def __init__(self, args):
+ for k, v in args.__dict__.iteritems():
+ if v!=None: self.__dict__[k] = v
+
+class NameNodeHeader:
+ OP_PING = 0
+ OP_GETNODES = 1
+
+class NameNodeQuery(ServerHandle):
+ def get_address(self):
+ if self.address[0].startswith('127.'):
+ return self.local_address()
+ else:
+ return self.address[0]
+
+ def process_query(self):
+ if self.header['op']==NameNodeHeader.OP_PING:
+ return self.ping()
+ elif self.header['op']==NameNodeHeader.OP_GETNODES:
+ return self.getnodes()
+ else:
+ assert False
+
+ def ping(self):
+ datanode_addr = self.get_address(), self.header['datanode_port']
+ logging.debug('Receiving ping from %s', datanode_addr)
+
+ if datanode_addr not in self.server.db_pings:
+ self.server.db_nodes.append(datanode_addr)
+ self.server.db_pings[datanode_addr] = time.time()
+
+ stored_blocks = self.recv()
+ for file_uuid, block_uuid in stored_blocks:
+ self.server.db_direct_lookup[file_uuid][block_uuid] = datanode_addr
+ self.server.db_reverse_lookup[datanode_addr].add((file_uuid, block_uuid))
+
+ return ServerResponse.ok(msg='Blocks processed.')
+
+ def getnodes(self):
+ num_nodes = self.headers['numnodes']
+ sample = random.sample(self.server.db_nodes, num_nodes)
+ self.send(sample)
+ return ServerResponse.ok(msg='List found.')
+
+class NameNode(Server):
+ def __init__(self, config):
+ self.config = config
+ logging.info("Configuring NameNode to listen on localhost:%d"%(self.config.port))
+ Server.__init__(self, NameNodeQuery, port=self.config.port)
+
+ self.db_nodes = []
+ self.db_direct_lookup = collections.defaultdict(dict)
+ self.db_reverse_lookup = collections.defaultdict(set)
+ self.db_pings = {}
+
+ def init(self):
+ self.serve()
+
+ def finalize(self):
+ logging.info("Finalizing NameNode...")
+ pass
+
+if __name__ == '__main__':
+ logging.getLogger().setLevel(logging.DEBUG)
+
+ from processname import setprocname
+ if not setprocname():
+ logging.error('Cannot change the process for %s.', __file__)
+
+ parser = argparse.ArgumentParser(description='NameNode: tracker for datanodes.')
+ parser.add_argument('-p', action="store", default=None, dest="port", type=int, help="Port where NameNode listens.")
+ config = NameNodeConfig(parser.parse_args())
+
+ nn = NameNode(config)
+ try:
+ nn.init()
+ except KeyboardInterrupt:
+ nn.finalize()
91 clusterdfs/networking.py
@@ -0,0 +1,91 @@
+import struct
+import cPickle
+import logging
+import commands
+import socket
+import gevent.server
+import gevent.socket
+
+class ServerResponse(object):
+ RESPONSE_OK = 0
+ RESPONSE_ERROR = 1
+
+ @classmethod
+ def ok(cls, msg='', data={}):
+ return {'code':cls.RESPONSE_OK, 'msg':msg, 'data':data}
+
+ @classmethod
+ def error(cls, msg='', data={}):
+ return {'code':cls.RESPONSE_ERROR, 'msg':msg, 'data':data}
+
+class NetworkException(Exception):
+ pass
+
+class NetworkEndpoint(object):
+ def __init__(self, socket):
+ self.socket = socket
+
+ def recv(self):
+ # get data_len
+ raw_data = self.socket.recv(4)
+ if len(raw_data)!=4:
+ raise NetworkException("Connection lost receiving header")
+ data_len, = struct.unpack('<I', raw_data)
+
+ # get data
+ raw_data = self.socket.recv(data_len)
+ assert len(raw_data)==data_len
+ return cPickle.loads(raw_data)
+
+ def send(self, data):
+ raw_data = cPickle.dumps(data)
+ self.socket.sendall(struct.pack('<I', len(raw_data)))
+ self.socket.sendall(raw_data)
+
+ def local_address(self):
+ return commands.getoutput("/sbin/ifconfig").split("\n")[1].split()[1][5:]
+
+ def kill(self):
+ self.socket.shutdown(socket.SHUT_WR)
+ self.socket.close()
+
+class ServerHandle(NetworkEndpoint):
+ def __init__(self, server, socket, address):
+ NetworkEndpoint.__init__(self, socket)
+ self.address = address
+ self.header = None
+ self.server = server
+
+ def process_query(self):
+ print self.header
+ return ServerResponse.ok()
+
+ def handle(self):
+ try:
+ self.header = self.recv()
+ response = self.process_query()
+ if response!=None: self.send(response)
+
+ except NetworkException, e:
+ logging.error("Failed connection from %s: %s."%(repr(self.address), e))
+ self.socket.close()
+
+ except socket.error, (value,message):
+ logging.error("Failed connection from %s: %s."%(repr(self.address), message))
+ self.socket.close()
+
+class Server(object):
+ def __init__(self, handle_class=ServerHandle, addr='', port=7777):
+ self.server = gevent.server.StreamServer((addr, port), self.handle)
+ self.handle_class = handle_class
+
+ def serve(self):
+ self.server.serve_forever()
+
+ def handle(self, socket, address):
+ server_handle = self.handle_class(self, socket, address)
+ server_handle.handle()
+
+class Client(NetworkEndpoint):
+ def __init__(self, addr, port):
+ NetworkEndpoint.__init__(self, gevent.socket.create_connection((addr, port)))
19 clusterdfs/processname.py
@@ -0,0 +1,19 @@
+import dl
+import sys
+import os.path
+
+libc_options = ['/lib/libc.so.6', '/lib/i386-linux-gnu/libc.so.6']
+
+def setprocname(name=None):
+ if name==None:
+ name = sys.argv[0].split('/')[-1]
+
+ for libc_path in libc_options:
+ if os.path.exists(libc_path):
+ try:
+ libc = dl.open(libc_path)
+ libc.call('prctl', 15, name, 0, 0, 0)
+ return True
+ except:
+ return False
+ return False
62 clusterdfs/test/xortest.py
@@ -0,0 +1,62 @@
+import time
+import random
+import struct
+import numpy
+
+total_time = 0
+size = 5000
+num_iters = 300
+
+a = bytearray(size)
+b = bytearray(size)
+c = bytearray(size)
+_a = numpy.ndarray(shape=(size), dtype='b')
+_b = numpy.ndarray(shape=(size), dtype='b')
+_c = numpy.ndarray(shape=(size), dtype='b')
+
+times = {'m1':0, 'm2':0, 'm3':0, 'm4':0, 'm5':0}
+
+for it in xrange(num_iters):
+ for i in xrange(size):
+ a[i] = chr(random.randrange(0, 256))
+ b[i] = chr(random.randrange(0, 256))
+ _a[i] = random.randrange(0, 256)
+ _b[i] = random.randrange(0, 256)
+
+ # method1:
+ init = time.time()
+ for i in xrange(size):
+ c[i] = a[i]^b[i]
+ times['m1'] += time.time() - init
+
+ # method2:
+ init = time.time()
+ offset = 0
+ for i in xrange(size/8):
+ ai, = struct.unpack_from('Q', buffer(a, offset, offset+8))
+ bi, = struct.unpack_from('Q', buffer(b, offset, offset+8))
+ struct.pack_into('Q', c, offset, ai^bi)
+ offset += 8
+ times['m2'] += time.time() - init
+
+ # method3:
+ init = time.time()
+ c = bytearray(numpy.array(a)^numpy.array(b))
+ times['m3'] += time.time() - init
+
+ # method4:
+ init = time.time()
+ r = numpy.array(a)^numpy.array(b)
+ for i in xrange(size):
+ c[i] = r[i]
+ times['m4'] += time.time() - init
+
+ # method5:
+ init = time.time()
+ _c = _a^_b
+ times['m5'] += time.time() - init
+
+ks = times.keys()
+ks.sort()
+for k in ks:
+ print k, times[k]/num_iters
130 doc/Makefile
@@ -0,0 +1,130 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cluster.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cluster.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/cluster"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cluster"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ make -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
260 doc/source/conf.py
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+#
+# cluster documentation build configuration file, created by
+# sphinx-quickstart on Fri Jan 6 10:47:01 2012.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('../../'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.ifconfig']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'cluster'
+copyright = u'2012, Lluis Pamies-Juarez'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.0.1'
+# The full version, including alpha/beta/rc tags.
+release = '0.0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'clusterdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'cluster.tex', u'cluster Documentation',
+ u'Lluis Pamies-Juarez', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'cluster', u'cluster Documentation',
+ [u'Lluis Pamies-Juarez'], 1)
+]
+
+
+# -- Options for Epub output ---------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = u'cluster'
+epub_author = u'Lluis Pamies-Juarez'
+epub_publisher = u'Lluis Pamies-Juarez'
+epub_copyright = u'2012, Lluis Pamies-Juarez'
+
+# The language of the text. It defaults to the language option
+# or en if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+#epub_exclude_files = []
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
33 doc/source/index.rst
@@ -0,0 +1,33 @@
+.. cluster documentation master file, created by
+ sphinx-quickstart on Fri Jan 6 10:47:01 2012.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to ClusterDFS's documentation!
+======================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+
+ClusterDFS's overview
+=====================
+
+In this section...
+
+The :mod:`clusterdfs` Module
+----------------------------
+
+.. automodule:: clusterdfs
+ :members:
+ :show-inheritance:
+
0  setup.py
No changes.
Please sign in to comment.
Something went wrong with that request. Please try again.