Ubuntuone API changes #11

Open
wants to merge 8 commits into
from
View
@@ -64,6 +64,18 @@ up an observer thread and schedule an event stream::
stream = Stream(callback, path)
observer.schedule(stream)
+The latency used by the observer can be adjusted to the requirements of you application. This is done by passing the latency keyword argument to the Observer:
+
+ from fsevents import Observer
+ observer = Observer(latency=0) # default value is 0.01
+ observer.start()
+
+You can also tell the observer to process the events as soon as possible rather than waiting for all the streams to be ready:
+
+ from fsevents import Observer
+ observer = Observer(process_asap=True) # default value is False
+ observer.start()
+
Streams can observe any number of paths; simply pass them as
positional arguments (or using the ``*`` operator)::
View
@@ -101,16 +101,25 @@ static PyObject* pyfsevents_loop(PyObject* self, PyObject* args) {
return Py_None;
}
-static PyObject* pyfsevents_schedule(PyObject* self, PyObject* args) {
+static PyObject* pyfsevents_schedule(PyObject* self, PyObject* args,
+ PyObject *keywds) {
PyObject* thread;
PyObject* stream;
PyObject* paths;
PyObject* callback;
+ PyObject* show_file_events;
- if (!PyArg_ParseTuple(args, "OOOO:schedule",
- &thread, &stream, &callback, &paths))
- return NULL;
+ // default latency to be used.
+ double latency = 0.01;
+
+ static char *kwlist[] = {"thread", "stream", "callback", "paths",
+ "show_file_events", "latency", NULL};
+ if (!PyArg_ParseTupleAndKeywords(args, keywds, "OOOOO|d:schedule", kwlist,
+ &thread, &stream, &callback, &paths,
+ &show_file_events, &latency))
+ return NULL;
+
/* stream must not already have been scheduled */
if (PyDict_Contains(streams, stream) == 1) {
return NULL;
@@ -142,13 +151,19 @@ static PyObject* pyfsevents_schedule(PyObject* self, PyObject* args) {
/* create event stream */
FSEventStreamContext context = {0, (void*) info, NULL, NULL, NULL};
FSEventStreamRef fsstream = NULL;
+
+ UInt32 flags = kFSEventStreamCreateFlagNoDefer;
+ if(show_file_events == Py_True){
+ flags = flags | kFSEventStreamCreateFlagFileEvents;
+ }
+
fsstream = FSEventStreamCreate(kCFAllocatorDefault,
(FSEventStreamCallback)&handler,
&context,
cfArray,
kFSEventStreamEventIdSinceNow,
- 0.01, // latency
- kFSEventStreamCreateFlagNoDefer);
+ latency,
+ flags);
CFRelease(cfArray);
PyObject* value = PyCObject_FromVoidPtr((void*) fsstream, PyMem_Free);
@@ -212,9 +227,9 @@ static PyObject* pyfsevents_stop(PyObject* self, PyObject* thread) {
static PyMethodDef methods[] = {
{"loop", pyfsevents_loop, METH_VARARGS, NULL},
{"stop", pyfsevents_stop, METH_O, NULL},
- {"schedule", pyfsevents_schedule, METH_VARARGS, NULL},
+ {"schedule", (PyCFunction) pyfsevents_schedule, METH_VARARGS | METH_KEYWORDS, NULL},
{"unschedule", pyfsevents_unschedule, METH_O, NULL},
- {NULL},
+ {NULL, NULL, 0, NULL},
};
static char doc[] = "Low-level FSEvent interface.";
View
@@ -1,3 +1,4 @@
+import logging
import os
import threading
import _fsevents
@@ -10,11 +11,33 @@
IN_MOVED_FROM = 0x00000040
IN_MOVED_TO = 0x00000080
+# flags from FSEvents to match event types:
+FSE_CREATED_FLAG = 0x0100
+FSE_MODIFIED_FLAG = 0x1000
+FSE_REMOVED_FLAG = 0x0200
+FSE_RENAMED_FLAG = 0x0800
+
+
+# loggin
+def logger_init():
+ log = logging.getLogger("fsevents")
+ console_handler = logging.StreamHandler()
+ console_handler.setFormatter(
+ logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
+ log.addHandler(console_handler)
+ log.setLevel(20)
+ return log
+
+log = logger_init()
+
+
class Observer(threading.Thread):
event = None
runloop = None
- def __init__(self):
+ def __init__(self, latency=0.01, process_asap=False):
+ self.process_asap = process_asap
+ self.latency = latency
self.streams = set()
self.schedulings = {}
self.lock = threading.Lock()
@@ -45,28 +68,47 @@ def run(self):
def _schedule(self, stream):
if not stream.paths:
- raise ValueError("No paths to observe.")
+ msg = "No paths to observe."
+ log.error(msg)
+ raise ValueError(msg)
+
if stream.file_events:
callback = FileEventCallback(stream.callback, stream.paths)
else:
def callback(paths, masks):
for path, mask in zip(paths, masks):
stream.callback(path, mask)
- _fsevents.schedule(self, stream, callback, stream.paths)
+ _fsevents.schedule(self, stream, callback, stream.paths,
+ stream.raw_file_events, latency=self.latency)
def schedule(self, stream):
- self.lock.acquire()
- try:
+
+ def schedule_callback():
if self.streams is None:
self._schedule(stream)
elif stream in self.streams:
- raise ValueError("Stream already scheduled.")
+ msg = "Stream already scheduled."
+ log.error(msg)
+ raise ValueError(msg)
else:
self.streams.add(stream)
if self.event is not None:
self.event.set()
- finally:
- self.lock.release()
+ if self.process_asap and self.is_alive():
+ while self.streams is not None:
+ pass
+
+ # decide if we want to block and thefore listen for events before all
+ # streams have been added or do the oposite
+ if self.process_asap:
@alecu

alecu Aug 29, 2012

this does not look safe at all

@mandel-macaque

mandel-macaque Aug 29, 2012

We should ask gatox why he did this...

+ log.debug('Processing events asap')
+ schedule_callback()
+ else:
+ self.lock.acquire()
+ try:
+ schedule_callback()
+ finally:
+ self.lock.release()
def unschedule(self, stream):
self.lock.acquire()
@@ -86,9 +128,11 @@ def stop(self):
self.event = None
event.set()
+
class Stream(object):
def __init__(self, callback, *paths, **options):
file_events = options.pop('file_events', False)
+ raw_file_events = options.pop('raw_file_events', False)
assert len(options) == 0, "Invalid option(s): %s" % repr(options.keys())
for path in paths:
if not isinstance(path, str):
@@ -98,6 +142,8 @@ def __init__(self, callback, *paths, **options):
self.callback = callback
self.paths = list(paths)
self.file_events = file_events
+ self.raw_file_events = raw_file_events
+
class FileEvent(object):
__slots__ = 'mask', 'cookie', 'name'
@@ -110,6 +156,7 @@ def __init__(self, mask, cookie, name):
def __repr__(self):
return repr((self.mask, self.cookie, self.name))
+
class FileEventCallback(object):
def __init__(self, callback, paths):
self.snapshots = {}
@@ -122,7 +169,9 @@ def __call__(self, paths, masks):
events = []
deleted = {}
- for path in sorted(paths):
+ paths_masks = zip(paths, masks)
+ log.debug('Processing paths with masks:%s', paths_masks)
+ for path, mask in sorted(paths_masks):
path = path.rstrip('/')
snapshot = self.snapshots[path]
@@ -138,21 +187,32 @@ def __call__(self, paths, masks):
pass
observed = set(current)
-
for name, snap_stat in snapshot.items():
filename = os.path.join(path, name)
if name in observed:
+ log.debug('File "%s" is observed')
stat = current[name]
- if stat.st_mtime > snap_stat.st_mtime:
- events.append(FileEvent(IN_MODIFY, None, filename))
- elif stat.st_ctime > snap_stat.st_ctime:
- events.append(FileEvent(IN_ATTRIB, None, filename))
+ if stat.st_mtime != snap_stat.st_mtime:
+ event = FileEvent(IN_MODIFY, None, filename)
+ log.debug('Appending event "%s"', event)
+ events.append(event)
+ if not mask & FSE_MODIFIED_FLAG:
+ log.debug("No matching flag for detected modify")
+ elif stat.st_ctime != snap_stat.st_ctime:
+ event = FileEvent(IN_ATTRIB, None, filename)
+ log.debug('Appending event "%s"', event)
+ events.append(event)
observed.discard(name)
else:
event = FileEvent(IN_DELETE, None, filename)
deleted[snap_stat.st_ino] = event
+ log.debug('Appending event "%s"', event)
events.append(event)
+ if ((not mask & FSE_REMOVED_FLAG) and
+ (not mask & FSE_RENAMED_FLAG)):
+ log.debug("delete detected with no "
+ "delete or rename flag")
for name in observed:
stat = current[name]
@@ -163,15 +223,31 @@ def __call__(self, paths, masks):
self.cookie += 1
event.mask = IN_MOVED_FROM
event.cookie = self.cookie
- event = FileEvent(IN_MOVED_TO, self.cookie, filename)
+ moved_to_event = FileEvent(IN_MOVED_TO, self.cookie,
+ filename)
+ log.debug('Appending event "%s"', event)
+ events.append(moved_to_event)
+ if not mask & FSE_RENAMED_FLAG:
+ log.debug('Rename detected without matching flag')
else:
- event = FileEvent(IN_CREATE, None, filename)
+ in_create_event = FileEvent(IN_CREATE, None, filename)
+ log.debug('Appending event "%s"', in_create_event)
+ events.append(in_create_event)
+ modified_event = FileEvent(IN_MODIFY, None, filename)
+ log.debug('Appending event "%s"', modified_event)
+ events.append(modified_event)
+
+ if not mask & FSE_MODIFIED_FLAG:
+ log.debug('Adding IN_MODIFY event when the flag was'
+ ' missing. Possible reason was a copy.')
+
+ if not mask & FSE_CREATED_FLAG:
+ log.debug("Create detected from snapshot"
+ "but event is not marked as create")
if os.path.isdir(filename):
self.snapshot(filename)
- events.append(event)
-
snapshot.clear()
snapshot.update(current)
Oops, something went wrong.