Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

More feedback to the user during load & computing #10

Merged
merged 2 commits into from
May 25, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions lib/proposals/lpo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -220,9 +220,10 @@ std::vector<TrainingParameters> filterParameters( const std::vector<TrainingPara
void LPO::train(const std::vector< std::shared_ptr< LPOModelTrainer > >& trainers, int n_samples, const float f0) {
static std::mt19937 rand;
const int N_RANDOM = 100, NIT=10;

printf("%d training segments\n", n_samples );


printf("training segments (%d samples)\n", n_samples );
std::cout.flush();

// Train the ensemble of models
VectorXf current_best_accuracy = VectorXf::Zero( n_samples );

Expand All @@ -234,7 +235,13 @@ void LPO::train(const std::vector< std::shared_ptr< LPOModelTrainer > >& trainer
exhaustive_id.push_back( i );
else
sampled_id.push_back( i );


if (VERBOSE)
{
printf(" =iteration_number [Method_name num_proposals (num_models) total = num_proposals]" \
"\t mean_best_accuracy n_prop*f0/n_samples\n");
}

Timer timer;
for( int it=0; it<NIT; it++ ) {
timer.tic();
Expand Down
44 changes: 40 additions & 4 deletions lib/python/dataset/voc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
#include <unordered_map>
#include <rapidxml.hpp>

#include <boost/progress.hpp>

#define XSTR( x ) STR( x )
#define STR( x ) std::string( #x )
#ifdef VOC_DIR
Expand Down Expand Up @@ -87,20 +89,29 @@ static dict loadEntry( const std::string & name, bool load_seg = true, bool load
sprintf( buf, (base_dir+VOC_OBJECT).c_str(), name.c_str() );
RMatrixXus olbl = readIPNG16( buf );
if( !olbl.diagonalSize() )
{
printf("Failed to read %s\n", buf);
return dict();
}
r["segmentation"] = cleanVOC(olbl);

sprintf( buf, (base_dir+VOC_CLASS).c_str(), name.c_str() );
RMatrixXus clbl = readIPNG16( buf );
if( !clbl.diagonalSize() )
{
printf("Failed to read %s\n", buf);
return dict();
}
r["class"] = cleanVOC(clbl);
}
if (load_im) {
sprintf( buf, (base_dir+VOC_IMAGES).c_str(), name.c_str() );
std::shared_ptr<Image8u> im = imreadShared( buf );
if( !im || im->empty() )
{
printf("Failed to read %s\n", buf);
return dict();
}
r["image"] = im;
}
sprintf( buf, (base_dir+VOC_ANNOT).c_str(), name.c_str() );
Expand All @@ -127,13 +138,35 @@ list loadVOC( bool train, bool valid, bool test ) {
const std::string base_dir = voc_dir + "/VOC" + std::to_string(YEAR) + "/";
bool read[3]={train,valid,test};
list r;
for( int i=0; i<3; i++ )
if( read[i] ){
std::ifstream is(base_dir+VOC_INFO<YEAR,detect>::image_sets[i]);
if (!is.is_open()) {
for( int i=0; i<3; i++ )
{
if( read[i] ) {
const std::string filepath = base_dir+VOC_INFO<YEAR,detect>::image_sets[i];
std::ifstream is_for_count(filepath), is(filepath);
if (!is_for_count.is_open() or !is.is_open()) {
printf("File '%s' not found! Check if DATA_DIR is set properly.\n",(base_dir+VOC_INFO<YEAR,detect>::image_sets[i]).c_str());
throw std::invalid_argument("Failed to load dataset");
}

const size_t lines_count = std::count(std::istreambuf_iterator<char>(is_for_count),
std::istreambuf_iterator<char>(), '\n');
is_for_count.close();
switch(i)
{
case 0:
printf("Loading Pascal VOC training data...\n");
break;
case 1:
printf("Loading Pascal VOC validation data...\n");
break;
case 2:
default:
printf("Loading Pascal VOC test data...\n");
break;
}

boost::progress_display progress(lines_count);

while(is.is_open() && !is.eof()) {
std::string l;
std::getline(is,l);
Expand All @@ -144,8 +177,11 @@ list loadVOC( bool train, bool valid, bool test ) {
else
printf("Failed to load image '%s'!\n",l.c_str());
}
progress += 1;
}
}
}
printf("Loading finished.\n");
return r;
}
#define INST_YEAR(N) \
Expand Down
7 changes: 7 additions & 0 deletions lib/python/segmentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,24 @@
#include "lpo.h"
#include "util.h"
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#include <boost/progress.hpp>


template<typename BDetector>
std::vector< std::shared_ptr<ImageOverSegmentation> > generateGeodesicKMeans1( const BDetector & det, const list & ims, int approx_N ) {
const int N = len(ims);
printf("Computing geodesic K-means for %i images...\n", N);
std::vector<Image8u*> img(N);
for( int i=0; i<N; i++ )
img[i] = extract<Image8u*>( ims[i] );
std::vector< std::shared_ptr<ImageOverSegmentation> > ios( N );
boost::progress_display progress(N);
#pragma omp parallel for
for( int i=0; i<N; i++ )
{
ios[i] = geodesicKMeans( *img[i], det, approx_N, 2 );
progress += 1; // not thread safe, but glitches in progress bar are inconsequential
}
return ios;
}
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS( ImageOverSegmentation_boundaryMap_overload, ImageOverSegmentation::boundaryMap, 0, 1 )
Expand Down
10 changes: 10 additions & 0 deletions src/analyze_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""

from __future__ import print_function

from pylab import *
from lpo import *
from util import *
Expand All @@ -44,6 +47,10 @@
args = parser.parse_args()

def evaluateDetailed( prop, over_segs, segmentations ):

print("Launching detailed evaluation, this might take a while...")
stdout.flush()

from time import time
BS = 100
names = prop.modelTypes
Expand Down Expand Up @@ -97,7 +104,10 @@ def evaluateDetailed( prop, over_segs, segmentations ):
bo[-1] = np.maximum( np.array(bo[m]), bo[-1] )
t[-1] += t[m]
ma[-1] /= ps[-1]

print( "name & pool size & % best & sqrt(median area) & time (see table 2 of paper)")
for m,n in enumerate(names):
assert len(ma[m]) > 0
print( names[m], '&', np.mean(ps[m]), '&', np.mean(bo[m]>=bbo)*100, '&', np.sqrt(np.mean(ma[m])), '&', t[m]/len(ma[m]) )


Expand Down
9 changes: 9 additions & 0 deletions src/eval_box.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,20 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""

from __future__ import print_function

from lpo import *
from util import *
from sys import argv,stdout
from pickle import dump
import numpy as np

def evaluateBox( prop, over_segs, boxes, name='', max_iou=0.9 ):

print("Launching evaluation, this might take a while...")
stdout.flush()

bos, pool_ss = [],[]
for i in range(0,len(over_segs),100):
props = prop.propose( over_segs[i:i+100], max_iou, True ) # Use box_nms
Expand All @@ -40,6 +47,8 @@ def evaluateBox( prop, over_segs, boxes, name='', max_iou=0.9 ):
pool_ss.append( pool_s )
bo,pool_s = np.hstack( bos ),np.hstack( pool_ss )
stdout.write('#prop = %0.3f ABO = %0.3f ARec = %0.3f\r'%(np.nanmean(pool_s),np.mean(bo),np.mean(2*np.maximum(bo-0.5,0))))

print("name & # prop. & ABO & 50%-recall & 70%-recall & 90%-recall & area recall (see table 3 of paper)")
print( "LPO %05s & %d & %0.3f & %0.3f & %0.3f & %0.3f & %0.3f \\\\"%(name,np.nanmean(pool_s),np.mean(bo),np.mean(bo>=0.5), np.mean(bo>=0.7), np.mean(bo>=0.9), np.mean(2*np.maximum(bo-0.5,0)) ) )
return bo,pool_s

Expand Down
10 changes: 10 additions & 0 deletions src/train_lpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""

from __future__ import print_function

from lpo import *
from util import *
import numpy as np
Expand Down Expand Up @@ -111,6 +114,8 @@ def evaluateBox( prop, over_segs, boxes, name='', bos=None, pool_ss=None, max_io
prop.addGBS("hsv",[50,100,150,200,350,600],1000)

print( "Training", f0 )
stdout.flush()

if args.box:
# Compute the boxes for each of the segmentations
boxes = [proposals.Proposals(s,np.eye(np.max(s)+1).astype(bool)).toBoxes() for s in segmentations]
Expand All @@ -137,11 +142,16 @@ def evaluateBox( prop, over_segs, boxes, name='', bos=None, pool_ss=None, max_io
over_segs,segmentations,boxes,names = loadVOCAndOverSeg( 'test', detector='mssf', year='2012_detect' )
else:
over_segs,segmentations,boxes,names = loadVOCAndOverSeg( 'test', detector='mssf' )

print( "Evaluating Pascal test data" )
stdout.flush()
if args.box:
all_bos,all_pool_ss = evaluateBox( prop, over_segs, boxes, name='(tst)', max_iou=args.iou )
else:
all_bos,all_pool_ss = evaluate( prop, over_segs, segmentations, name='(tst)', max_iou=args.iou )
elif args.dataset.lower() == 'coco':
print( "Loading and evaluating Coco test data" )
stdout.flush()
all_bos,all_pool_ss = [],[]
for n in range(dataset.cocoNFolds()):
over_segs,segmentations,boxes,names = loadCOCOAndOverSeg( 'test', detector='mssf', fold=n )
Expand Down
28 changes: 20 additions & 8 deletions src/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""

from __future__ import print_function

from sys import stdout

try:
Expand All @@ -49,14 +52,15 @@ def getDetector( detector="sf" ):
r = contour.DirectedSobel()
return r

def loadAndOverSegDataset( loader, name, detector="sf", N_SPIX=1000 ):
def loadAndOverSegDataset( loader, name, detector_name="sf", N_SPIX=1000 ):
import numpy as np
from pickle import dumps,loads
from lpo import segmentation
from tempfile import gettempdir
FILE_NAME = '/%s/%s_%s_%d.dat'%(gettempdir(),name,detector,N_SPIX)
FILE_NAME = '/%s/%s_%s_%d.dat'%(gettempdir(),name,detector_name,N_SPIX)
try:
with open(FILE_NAME,'rb') as f:
print("Loading cache data from", FILE_NAME)
over_segs,segmentations,boxes,names = loads( f.read() )
f.close()
over_seg = segmentation.VecImageOverSegmentation()
Expand All @@ -77,25 +81,33 @@ def loadAndOverSegDataset( loader, name, detector="sf", N_SPIX=1000 ):
boxes = [e['boxes'] for e in data if 'boxes' in e]

# Do the over-segmentation
detector = getDetector()
detector = getDetector(detector=detector_name)

if detector != None:
over_segs = segmentation.generateGeodesicKMeans( detector, images, N_SPIX )


del data # we free the memory used by the images

print("Saving oversegmentation in", FILE_NAME,
"(this might take a while)", end="...")
stdout.flush()
with open(FILE_NAME,'wb') as f:
f.write( dumps( ([compress(i) for i in over_segs],[compress(i) for i in segmentations],[compress(i) for i in boxes], names) ) )
# this section will require lots of memory
f.write( dumps( ([compress(i) for i in over_segs],
[compress(i) for i in segmentations],
[compress(i) for i in boxes], names) ) )
f.close()

print("done.")
return over_segs,segmentations,boxes,names

def loadCOCOAndOverSeg( im_set="test", detector="sf", N_SPIX=1000, fold=0 ):
from lpo import dataset
return loadAndOverSegDataset( lambda: dataset.loadCOCO2014(im_set=="train",im_set=="test",fold), "COCO_%s_%d"%(im_set,fold), detector=detector, N_SPIX=N_SPIX )
return loadAndOverSegDataset( lambda: dataset.loadCOCO2014(im_set=="train",im_set=="test",fold), "COCO_%s_%d"%(im_set,fold), detector_name=detector, N_SPIX=N_SPIX )

def loadVOCAndOverSeg( im_set="test", detector="sf", N_SPIX=1000, year="2012" ):
from lpo import dataset
ldr = eval("dataset.loadVOC%s"%year)
return loadAndOverSegDataset( lambda: ldr(im_set=="train",im_set=="valid",im_set=="test"), "VOC%s_%s"%(year,im_set), detector=detector, N_SPIX=N_SPIX )
return loadAndOverSegDataset( lambda: ldr(im_set=="train",im_set=="valid",im_set=="test"), "VOC%s_%s"%(year,im_set), detector_name=detector, N_SPIX=N_SPIX )

def saveProposalsHDF5( p, fn ):
import h5py
Expand Down