Permalink
Browse files

Improved tree filter, New find in project search feature

  • Loading branch information...
1 parent e73b69e commit f73ec13194077d1ac6fd90ddc65d9983646d9c63 @lusob committed Nov 27, 2011
Showing with 81 additions and 71 deletions.
  1. +9 −8 djide/static/js/ui-ide.js
  2. +14 −4 djide/views.py
  3. +58 −59 djide/whooshlib.py
View
17 djide/static/js/ui-ide.js 100755 → 100644
@@ -69,7 +69,7 @@ Ext.onReady(function(){
var animate = this.tree.animate;
this.tree.animate = false;
- this.tree.expandAll();
+ this.tree.expand(startNode);
this.tree.animate = animate;
Ext.ux.tree.TreeFilterX.superclass.filter.apply(this, arguments);
@@ -183,11 +183,11 @@ Ext.onReady(function(){
,enableKeyEvents:true
,listeners:{
keyup:{buffer:150, fn:function(field, e) {
- if(Ext.EventObject.ESC == e.getKey()) {
+ var val = this.getRawValue();
+ if(Ext.EventObject.ESC == e.getKey() || !val.match(/\S/)) {
field.onTriggerClick();
}
else {
- var val = this.getRawValue();
var re = new RegExp('.*' + val + '.*', 'i');
tree.filter.clear();
tree.filter.filter(re, 'text');
@@ -450,7 +450,7 @@ Ext.onReady(function(){
iconCls: 'icon-grid',
listeners: {
'rowclick': function( grid, rowIndex, e ) {
- var fileFullPath = 'workingcopies/'+appname +"/"+ grid.store.getAt(rowIndex).get('location');
+ var fileFullPath = grid.store.getAt(rowIndex).get('location');
syncEditor(fileFullPath);
}
}
@@ -589,12 +589,13 @@ Ext.onReady(function(){
aEditors[tabs.getActiveTab().id].reindentSelection();
}
function onButtonFindClick(){
+ /*
Ext.Msg.show({
title:'Feature not available',
msg: 'This feature is pending to do in next releases.',
- });
-
- /*
+ });
+ */
+
Ext.Msg.prompt('Keywords', 'Please enter text to find:', function(btn, keywords){
if (btn == 'ok' && !isEmpty(keywords)){
Ext.getBody().mask('Searching...', 'x-mask-loading');
@@ -613,7 +614,7 @@ Ext.onReady(function(){
});
}
});
- */
+
}
function onStyleChange(item, checked){
if (checked)
View
@@ -30,7 +30,6 @@ def model_editor(request):
app_name = request.POST.get('app_name')
modPath = __import__(app_name).__path__[0]
projectRoot = modPath.rstrip(app_name)
-
response=[]
if(request.POST.get('cmd') == 'getMeta'):
@@ -67,6 +66,16 @@ def model_editor(request):
dataPost = request.POST.get('data').decode('string_escape')
setDataFile(request.POST.get('path'), dataPost, projectRoot)
return HttpResponse("")
+ elif(request.POST.get('cmd') == 'find'):
+ keywords = request.POST.get('keywords');
+ if(keywords and len(keywords)>0):
+ import whooshlib
+ index_path_file = os.path.join(IDE_PATH,'metafiles/.%s_index' % urllib.unquote_plus(app_name))
+ whooshlib.index_my_docs(projectRoot, index_path_file)
+ arrResults = []
+ for hit in whooshlib.find(index_path_file, keywords):
+ arrResults.append([hit.highlights("content"), hit["path"]])
+ return HttpResponse(json.dumps(arrResults) if len(arrResults)>0 else json.dumps([['No results found','']]))
else:
return HttpResponse("")
@@ -97,13 +106,14 @@ def setDataFile(id, fileContent, rootPath):
fullPathName = os.path.join(rootPath, urllib.unquote_plus(id))
handle = open(fullPathName, 'w')
handle.write(fileContent)
- except IOError:
- return HttpResponse('File exception (%s)'%urllib.unquote_plus(fileName))
- finally:
handle.close()
+ except IOError:
+ return HttpResponse('File exception (%s)'%urllib.unquote_plus(id))
+
def getDataFile(id, rootPath):
try:
return open(os.path.join(rootPath, urllib.unquote_plus(id))).read()
except IOError:
return HttpResponse('File exception (%s)'%id)
+
View
@@ -1,6 +1,5 @@
'''
-Es fichero es un conjunto de funciones parap indexar y busca dentro de los ficheros
-python de un directorio
+Library to fulltext search inside python files
Example of use
import whooshlib
@@ -15,41 +14,43 @@
from whoosh import index
from whoosh.fields import Schema, ID, TEXT, STORED
-def clean_index(dirname):
- # Always create the index from scratch
- ix = index.create_in(dirname, schema=get_schema())
- writer = ix.writer()
+def clean_index(dirname, index_dir, index_name):
+ # Always create the index from scratch
+ ix = index.create_in(index_dir, indexname=index_name, schema=get_schema())
+ writer = ix.writer()
- # Assume we have a function that gathers the filenames of the
- # documents to be indexed
- for root, dirnames, filenames in os.walk(dirname):
- for filename in fnmatch.filter(filenames, '*.py'):
- py_file = (os.path.join(root, filename))
- add_doc(writer, py_file)
- writer.commit()
+ # Assume we have a function that gathers the filenames of the
+ # documents to be indexed
+ for root, dirnames, filenames in os.walk(dirname):
+ for filename in fnmatch.filter(filenames, '*.py'):
+ py_file = (os.path.join(root, filename))
+ add_doc(writer, py_file)
+ writer.commit()
def get_schema():
- return Schema(path=ID(unique=True, stored=True), time=STORED, content=TEXT(stored=True))
+ return Schema(path=ID(unique=True, stored=True), time=STORED, content=TEXT(stored=True))
def add_doc(writer, path):
- fileobj=open(path, "rb")
- content=fileobj.read()
- fileobj.close()
- modtime = os.path.getmtime(path)
- writer.add_document(path=path, content=unicode(content, errors='ignore'), time=modtime)
-
-
-def index_my_docs(dirname, clean=False):
- if clean:
- clean_index(dirname)
- else:
- try:
- incremental_index(dirname)
- except:
- clean_index(dirname)
+ fileobj=open(path, "rb")
+ content=fileobj.read()
+ fileobj.close()
+ modtime = os.path.getmtime(path)
+ writer.add_document(path=path, content=unicode(content, errors='ignore'), time=modtime)
+
+
+def index_my_docs(dirname, index_path_file, clean=False):
+ index_name = os.path.basename(index_path_file)
+ index_dir = os.path.dirname(index_path_file)
+ if clean:
+ clean_index(dirname, index_dir, index_name)
+ else:
+ try:
+ incremental_index(dirname, index_dir, index_name)
+ except:
+ clean_index(dirname, index_dir, index_name)
-def incremental_index(dirname):
- ix = index.open_dir(dirname)
+def incremental_index(dirname, index_dir, index_name):
+ ix = index.open_dir( index_dir, indexname=index_name)
searcher = ix.searcher()
# The set of all paths in the index
@@ -61,43 +62,41 @@ def incremental_index(dirname):
# Loop over the stored fields in the index
for fields in searcher.all_stored_fields():
- indexed_path = fields['path']
- indexed_paths.add(indexed_path)
-
- if not os.path.exists(indexed_path):
- # This file was deleted since it was indexed
- writer.delete_by_term('path', indexed_path)
-
- else:
- # Check if this file was changed since it
- # was indexed
- indexed_time = fields['time']
- mtime = os.path.getmtime(indexed_path)
- if mtime > indexed_time:
- # The file has changed, delete it and add it to the list of
- # files to reindex
- writer.delete_by_term('path', indexed_path)
- to_index.add(indexed_path)
+ indexed_path = fields['path']
+ indexed_paths.add(indexed_path)
+
+ if not os.path.exists(indexed_path):
+ # This file was deleted since it was indexed
+ writer.delete_by_term('path', indexed_path)
+ else:
+ # Check if this file was changed since it
+ # was indexed
+ indexed_time = fields['time']
+ mtime = os.path.getmtime(indexed_path)
+ if mtime > indexed_time:
+ # The file has changed, delete it and add it to the list of
+ # files to reindex
+ writer.delete_by_term('path', indexed_path)
+ to_index.add(indexed_path)
# Loop over the files in the filesystem
# Assume we have a function that gathers the filenames of the
# documents to be indexed
for root, dirnames, filenames in os.walk(dirname):
- for filename in fnmatch.filter(filenames, '*.py'):
- py_file = (os.path.join(root, filename))
- if py_file in to_index or py_file not in indexed_paths:
- # This is either a file that's changed, or a new file
- # that wasn't indexed before. So index it!
- add_doc(writer, py_file)
-
+ for filename in fnmatch.filter(filenames, '*.py'):
+ py_file = (os.path.join(root, filename))
+ if py_file in to_index or py_file not in indexed_paths:
+ # This is either a file that's changed, or a new file
+ # that wasn't indexed before. So index it!
+ add_doc(writer, py_file)
writer.commit()
-def find(dirname, q):
+def find(index_path_file, q):
+ index_name = os.path.basename(index_path_file)
+ index_dir = os.path.dirname(index_path_file)
from whoosh.qparser import QueryParser
- ix = index.open_dir(dirname)
+ ix = index.open_dir(index_dir, indexname=index_name)
s=ix.searcher()
qp = QueryParser("content", schema=ix.schema)
p = qp.parse(unicode(q))
return s.search(p, limit=None)
-
-

0 comments on commit f73ec13

Please sign in to comment.