Permalink
Browse files

Merge branch 'v1' of github.com:quantombone/exemplarsvm

  • Loading branch information...
2 parents 1acc55a + 1390b9d commit 46069c26238b6d7e4e4dd7f1c595d0fe781304f3 @quantombone committed Jan 8, 2012
Showing with 2,616 additions and 10,382 deletions.
  1. +0 −1 .gitignore
  2. +20 −0 COPYING
  3. +182 −27 README.md
  4. +2 −1 VOCcode/VOCevaldet.m
  5. +0 −215 apply_all_exemplars.m
  6. +0 −13 debug_demos/README.md
  7. +0 −23 debug_demos/count_windows.m
  8. +0 −48 debug_demos/demo_datasets.m
  9. +0 −94 debug_demos/demo_nonlinear.m
  10. +0 −52 debug_demos/demo_random_svms.m
  11. +0 −17 debug_demos/test_times.m
  12. +0 −66 demos/README.md
  13. +0 −50 demos/get_voc_dataset.m
  14. +0 −8 demos/load_data_directory.m
  15. +0 −111 demos/old.demos/draw_memex.m
  16. +0 −49 demos/old.demos/dump_memex_icons.m
  17. +0 −89 demos/old.demos/plot_size2_plot.m
  18. +0 −119 demos/old.demos/plot_size3_plot.m
  19. +0 −114 demos/old.demos/plot_size_plot.m
  20. +0 −276 demos/old.demos/plot_voc_results.m
  21. +0 −253 demos/old.demos/plot_voc_results3.m
  22. +0 −293 demos/old.demos/plot_voc_results_horiz.m
  23. +0 −112 demos/old.demos/video_scene.m
  24. +0 −155 demos/old.demos/voc_demo_dfun.m
  25. +0 −157 demos/old.demos/voc_demo_dumpfigs.m
  26. +0 −158 demos/old.demos/voc_demo_esvm_1000neg.m
  27. +0 −142 demos/old.demos/voc_demo_esvm_2010.m
  28. +0 −146 demos/old.demos/voc_demo_esvm_2010_full.m
  29. +0 −165 demos/old.demos/voc_demo_esvm_250neg.m
  30. +0 −169 demos/old.demos/voc_demo_esvm_25neg.m
  31. +0 −164 demos/old.demos/voc_demo_esvm_icons.m
  32. +0 −165 demos/old.demos/voc_demo_esvm_large.m
  33. +0 −166 demos/old.demos/voc_demo_esvm_small.m
  34. +0 −165 demos/old.demos/voc_demo_esvm_tiny.m
  35. +0 −150 demos/old.demos/voc_demo_nn.m
  36. +0 −148 demos/old.demos/voc_demo_nn2.m
  37. +0 −151 demos/old.demos/voc_demo_nocat.m
  38. +0 −142 demos/old.demos/voc_demo_segall.m
  39. +0 −168 demos/old.demos/voc_demo_svs.m
  40. +0 −169 demos/old.demos/voc_demo_tophits.m
  41. +0 −147 demos/old.demos/voc_memex.m
  42. +0 −259 demos/old.demos/voc_template_dumpfigs.m
  43. +0 −256 demos/old.demos/voc_template_memex.m
  44. +0 −263 demos/old.demos/voc_template_tophits.m
  45. +0 −125 demos/voc_demo_esvm.m
  46. +0 −298 demos/voc_template.m
  47. +0 −173 do_svm.m
  48. +69 −46 iccv11/pool_exemplar_detections.m → esvm_apply_calibration.m
  49. +95 −0 esvm_demo_apply_exemplars.m
  50. +148 −0 esvm_demo_train_fast.m
  51. +122 −142 localizemeHOG.m → esvm_detect.m
  52. +255 −0 esvm_detect_imageset.m
  53. +14 −0 esvm_download_models.m
  54. +81 −56 get_default_mining_params.m → esvm_get_default_params.m
  55. +73 −21 exemplar_initialize.m → esvm_initialize_exemplars.m
  56. +28 −0 esvm_perform_calibration.m
  57. +12 −0 esvm_quick_demo.m
  58. +109 −0 esvm_script_train_voc_class.m
  59. +74 −36 iccv11/show_top_dets.m → esvm_show_top_dets.m
  60. +14 −14 iccv11/show_hits_figure_iccv.m → esvm_show_transfer_figure.m
  61. +21 −0 esvm_subset_of_models.m
  62. +241 −0 esvm_train_exemplars.m
  63. +7 −0 esvm_update_voc_models.m
  64. +1 −1 features/HOGpicture.m
  65. +1 −1 features/README.md
  66. +0 −11 features/compile.m
  67. BIN features/fconvblas.mexmaci64
  68. +18 −12 features/featpyramid2.m
  69. +18 −0 features/features_compile.m
  70. BIN features/features_pedro.mexmaci64
  71. BIN features/features_raw.mexmaci64
  72. +7 −11 features/resize.cc
  73. BIN features/resize.mexmaci64
  74. +0 −119 followup/demo_regtrain.m
  75. +0 −178 followup/demo_vreg.m
  76. +0 −295 followup/demo_vreg2.m
  77. +0 −92 followup/kp_graph.m
  78. +0 −5 get_default_mining_params_scene.m
  79. +0 −40 get_dominant_basis.m
  80. +0 −97 get_pascal_stream.m
  81. +0 −3 gps/README.md
  82. BIN gps/all_gps.mat
  83. +0 −6 gps/apply_james.m
  84. +0 −16 gps/apply_sketches.m
  85. +0 −36 gps/cart2gps.m
  86. +0 −47 gps/generate_gps_maps.m
  87. +0 −16 gps/get_gps_ball.m
  88. +0 −15 gps/get_james_160matches.m
  89. +0 −20 gps/get_james_bg.m
  90. +0 −73 gps/get_james_nearest.m
  91. +0 −51 gps/gps2cart.m
  92. +0 −176 gps/initialize_james_exemplars.m
  93. +0 −12 gps/james_name.m
  94. +0 −25 gps/kml_add_overlay.m
  95. +0 −13 gps/kml_add_placemark.m
  96. +0 −81 gps/kml_density_image.m
  97. +0 −87 gps/kml_distribution.m
  98. +0 −11 gps/load_james_image.m
  99. +0 −7 gps/loop_james_show.m
  100. +0 −64 gps/precompute_james_nearest.m
  101. +0 −92 gps/show_compare_james.m
  102. +0 −39 gps/show_james.m
  103. +0 −20 iccv11/apply_boost_M.m
  104. +0 −22 iccv11/calibrate_and_estimate_M.m
  105. +0 −29 iccv11/extract_bbs_from_rs.m
  106. +0 −41 iccv11/find_set_membership.m
  107. +0 −16 iccv11/initialize_mining_queue.m
  108. +0 −66 iccv11/old.initialize_model_dt.m
  109. +0 −25 iccv11/old.load_default_class.m
  110. +0 −8 iccv11/old.save_default_class.m
  111. +0 −46 iccv11/old.slow_nms.m
  112. BIN images/demo_framing.pdf
  113. BIN images/sa_teaser.png
  114. 0 {iccv11 → internal}/README.md
  115. 0 {iccv11 → internal}/calibrate_boxes.m
  116. +10 −5 iccv11/adjust_boxes.m → internal/esvm_adjust_boxes.m
  117. +28 −0 internal/esvm_apply_M.m
  118. +60 −50 iccv11/estimate_M.m → internal/esvm_estimate_M.m
  119. +5 −0 internal/esvm_get_default_params_scene.m
  120. +108 −0 internal/esvm_get_pascal_stream.m
  121. +4 −3 initialize_fixedframe_model.m → internal/esvm_initialize_fixedframe_exemplar.m
  122. +10 −10 initialize_goalsize_model.m → internal/esvm_initialize_goalsize_exemplar.m
  123. +21 −0 internal/esvm_initialize_mining_queue.m
  124. +38 −35 load_all_models.m → internal/esvm_load_models.m
  125. +37 −17 load_result_grid.m → internal/esvm_load_result_grid.m
  126. +14 −14 mine_negatives.m → internal/esvm_mine_negatives.m
  127. +20 −46 mine_train_iteration.m → internal/esvm_mine_train_iteration.m
  128. +24 −26 iccv11/perform_calibration.m → internal/esvm_perform_platt_calibration.m
  129. +14 −24 do_dfun.m → internal/esvm_update_dfun.m
  130. +166 −0 internal/esvm_update_svm.m
  131. +41 −34 {iccv11 → internal}/evaluate_pascal_voc_grid.m
  132. 0 { → internal}/exemplar_initialize_dt.m
  133. +7 −3 {iccv11 → internal}/exemplar_inpaint.m
  134. 0 {iccv11 → internal/experiments}/complem_experiment.m
  135. 0 {iccv11 → internal/experiments}/cook_abhinav_transfers.m
  136. 0 {iccv11 → internal/experiments}/count_correct_person.m
  137. +1 −0 {iccv11 → internal/experiments}/do_pedro_transfer.m
  138. +1 −1 {iccv11 → internal/experiments}/evaluate_pascal_voc_grid_complements.m
  139. 0 {iccv11 → internal/experiments}/generate_figures.m
  140. 0 {iccv11 → internal/experiments}/generate_supp_mat_tex.m
  141. 0 {iccv11 → internal/experiments}/geom_eval.m
  142. 0 {iccv11 → internal/experiments}/label_buses.m
  143. 0 {iccv11 → internal/experiments}/show_context_hits.m
  144. 0 {iccv11 → internal/experiments}/stats_on_person.m
  145. 0 {iccv11 → internal/experiments}/supplemental.tex
  146. 0 {iccv11 → internal/experiments}/test_all_complements.m
  147. 0 {iccv11 → internal}/extract_svs.m
  148. 0 {iccv11 → internal}/faces2colors.m
  149. 0 {iccv11 → internal}/flip_box.m
  150. 0 {iccv11 → internal}/flip_faces.m
  151. 0 {iccv11 → internal}/flip_image.m
  152. 0 { → internal}/generate_docs.m
  153. 0 {iccv11 → internal}/get_box_features.m
  154. +16 −8 {iccv11 → internal}/get_exemplar_icon.m
  155. 0 {iccv11 → internal}/get_file_id.m
  156. +1 −1 {iccv11 → internal}/get_geometry_icon.m
  157. 0 { → internal}/get_pascal_anno_function.m
  158. +17 −5 {iccv11 → internal}/get_pascal_set.m
  159. 0 {iccv11 → internal}/get_seg_icon.m
  160. 0 {iccv11 → internal}/get_sv_row.m
  161. +25 −10 {iccv11 → internal}/get_sv_stack.m
  162. +44 −0 internal/get_voc_dataset.m
  163. 0 {iccv11 → internal}/learn_sigmoid.m
  164. 0 {iccv11 → internal}/mywarppos.m
  165. 0 {iccv11 → internal}/nms.m
  166. 0 {iccv11 → internal}/nms_within_exemplars.m
  167. +1 −1 {iccv11 → internal}/perform_hitdump.m
  168. 0 {iccv11 → internal}/populate_wiggles.m
  169. 0 {iccv11 → internal}/prune_grid.m
  170. 0 {iccv11 → internal}/replica_hits.m
  171. 0 {iccv11 → internal}/showM.m
  172. 0 {iccv11 → internal}/show_hits_figure.m
  173. +18 −0 internal/show_top_match_image.m
  174. 0 {iccv11 → internal}/show_top_transfers.m
  175. +1 −1 iccv11/strip_model.m → internal/strip_models.m
  176. +5 −4 {iccv11 → internal}/transfer_friends.m
  177. BIN libsvm-mat-3.0-1/libsvmread.mexa64
  178. BIN libsvm-mat-3.0-1/libsvmread.mexmaci
  179. BIN libsvm-mat-3.0-1/libsvmread.mexmaci64
  180. BIN libsvm-mat-3.0-1/libsvmread.mexw32
  181. BIN libsvm-mat-3.0-1/libsvmwrite.mexa64
  182. BIN libsvm-mat-3.0-1/libsvmwrite.mexmaci
  183. BIN libsvm-mat-3.0-1/libsvmwrite.mexmaci64
  184. BIN libsvm-mat-3.0-1/libsvmwrite.mexw32
  185. +0 −10 libsvm-mat-3.0-1/make.m
  186. BIN libsvm-mat-3.0-1/svmpredict.mexa64
  187. BIN libsvm-mat-3.0-1/svmpredict.mexmaci
  188. BIN libsvm-mat-3.0-1/svmpredict.mexw32
  189. 0 {libsvm-mat-3.0-1 → libsvm}/.gitignore
  190. 0 {libsvm-mat-3.0-1 → libsvm}/COPYRIGHT
  191. 0 {libsvm-mat-3.0-1 → libsvm}/Makefile
  192. 0 {libsvm-mat-3.0-1 → libsvm}/README
  193. +10 −0 libsvm/libsvm_compile.m
  194. 0 libsvm-mat-3.0-1/svmpredict.c → libsvm/libsvmpredict.c
  195. 0 {libsvm-mat-3.0-1 → libsvm}/libsvmread.c
  196. 0 {libsvm-mat-3.0-1 → libsvm}/libsvmtrain.c
  197. BIN {libsvm-mat-3.0-1 → libsvm}/libsvmtrain.mexa64
  198. BIN {libsvm-mat-3.0-1 → libsvm}/libsvmtrain.mexmaci
  199. BIN {libsvm-mat-3.0-1 → libsvm}/libsvmtrain.mexmaci64
  200. 0 {libsvm-mat-3.0-1 → libsvm}/libsvmwrite.c
  201. 0 {libsvm-mat-3.0-1 → libsvm}/svm.cpp
  202. 0 {libsvm-mat-3.0-1 → libsvm}/svm.h
  203. 0 {libsvm-mat-3.0-1 → libsvm}/svm_model_matlab.c
  204. 0 {libsvm-mat-3.0-1 → libsvm}/svm_model_matlab.h
  205. +0 −126 load_voc_models.m
  206. +0 −9 memex_browser/README.md
  207. +0 −144 memex_browser/memex.js
  208. +0 −7 memex_browser/raphael.js
  209. +0 −224 memex_browser/show_exemplar_browser.m
  210. +0 −121 memex_browser/show_memex_browser2.m
  211. 0 debug_demos/demo_framing.m → old.demo_framing.m
  212. +161 −0 old.demo_interact.m
  213. +0 −185 train_all_exemplars.m
  214. +15 −0 util/check_for_lock_files.m
  215. 0 util/{ → display}/jettify.m
  216. 0 util/{ → display}/plot_bbox.m
  217. 0 util/{ → display}/plot_faces.m
  218. 0 { → util/display}/show_exemplar_frames.m
  219. 0 util/{ → display}/tight_subplot.m
  220. +2 −0 util/util_compile.m
  221. +0 −33 video/ScreenCapture.m
  222. +44 −231 video/capture_screen.m
  223. +2 −2 video/get_movie_bg.m
  224. +19 −15 video/get_screenshot_bg.m
  225. +8 −11 video/initialize_screenshot.m
  226. +6 −6 video/select_bbox_from_image.m
  227. +0 −388 video/video_exemplar_initialize.m
View
@@ -1,4 +1,3 @@
-*.mat
*~
.DS_Store
data/
View
20 COPYING
@@ -0,0 +1,20 @@
+Copyright (C) 2011-12 by Tomasz Malisiewicz
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
View
209 README.md
@@ -1,47 +1,202 @@
-### ICCV2011 Abstract
+Welcome to the Exemplar-SVM library, a large-scale object recognition
+library developed at Carnegie Mellon University while obtaining my PhD
+in Robotics.
+ -- Tomasz Malisiewicz
-This paper proposes a conceptually simple but surprisingly powerful method which combines the effectiveness of a discriminative object detector with the explicit correspondence offered by a nearest-neighbor approach. The method is based on training a separate linear SVM classifier for every exemplar in the training set. Each of these Exemplar-SVMs is thus defined by a single positive instance and millions of negatives. While each detector is quite specific to its exemplar, we empirically observe that an ensemble of such Exemplar-SVMs offers surprisingly good generalization. Our performance on the PASCAL VOC detection task is on par with the much more complex latent part-based model of Felzenszwalb et al., at only a modest computational cost increase. But the central benefit of our approach is that it creates an explicit association between each detection and a single training exemplar. Because most detections show good alignment to their associated exemplar, it is possible to transfer any available exemplar meta-data (segmentation, geometric structure, 3D model, etc.) directly onto the detections, which can then be used as part of overall scene understanding.
+The code is written in Matlab and is the basis of the following two
+projects:
+
+## [Tomasz Malisiewicz](http://www.cs.cmu.edu/~tmalisie/), [Abhinav Gupta](http://www.cs.cmu.edu/~abhinavg), [Alexei A. Efros](http://www.cs.cmu.edu/~efros). **Ensemble of Exemplar-SVMs for Object Detection and Beyond.** In ICCV, 2011. [PDF](http://www.cs.cmu.edu/~tmalisie/projects/iccv11/exemplarsvm-iccv11.pdf) | [Project Page](http://www.cs.cmu.edu/~tmalisie/projects/iccv11/)
![](https://github.com/quantombone/exemplarsvm/raw/master/images/exemplar_classifiers-small_n.png)
-----
-**Author + Executive Exemplar-SVM Developer**: [Tomasz Malisiewicz](http://www.cs.cmu.edu/~tmalisie/)
-<br/>
-**Exemplar-SVM Fellow Developer**: [Abhinav Shrivastava](http://www.abhinav-shrivastava.info/)
-<br/>
-**Exemplar-SVM Visionary**: [Abhinav Gupta](http://www.cs.cmu.edu/~abhinavg)
-<br/>
-**Exemplar-SVM Visionary**: [Alexei A. Efros](http://www.cs.cmu.edu/~efros)
-###Please cite the following paper if you use this library:
+Abstract
+
+This paper proposes a conceptually simple but surprisingly powerful method which combines the effectiveness of a discriminative object detector with the explicit correspondence offered by a nearest-neighbor approach. The method is based on training a separate linear SVM classifier for every exemplar in the training set. Each of these Exemplar-SVMs is thus defined by a single positive instance and millions of negatives. While each detector is quite specific to its exemplar, we empirically observe that an ensemble of such Exemplar-SVMs offers surprisingly good generalization. Our performance on the PASCAL VOC detection task is on par with the much more complex latent part-based model of Felzenszwalb et al., at only a modest computational cost increase. But the central benefit of our approach is that it creates an explicit association between each detection and a single training exemplar. Because most detections show good alignment to their associated exemplar, it is possible to transfer any available exemplar meta-data (segmentation, geometric structure, 3D model, etc.) directly onto the detections, which can then be used as part of overall scene understanding.
+
+---
-Tomasz Malisiewicz, Abhinav Gupta, Alexei A. Efros. **Ensemble of Exemplar-SVMs for Object Detection and Beyond.** In ICCV, 2011.
-[PDF](http://www.cs.cmu.edu/~tmalisie/projects/iccv11/exemplarsvm-iccv11.pdf)
-[Project Page](http://www.cs.cmu.edu/~tmalisie/projects/iccv11/)
+## [Abhinav Shrivastava](http://www.abhinav-shrivastava.info/), [Tomasz Malisiewicz](http://www.cs.cmu.edu/~tmalisie/), [Abhinav Gupta](http://www.cs.cmu.edu/~abhinavg), [Alexei A. Efros](http://www.cs.cmu.edu/~efros). **Data-driven Visual Similarity for Cross-domain Image Matching.** In SIGGRAPH ASIA, December 2011. [PDF](http://www.cs.cmu.edu/~tmalisie/projects/sa11/shrivastava-sa11.pdf) | [Project Page](http://graphics.cs.cmu.edu/projects/crossDomainMatching/)
-###See my PhD thesis for more information and ICCV follow-up experiments:
+![](https://github.com/quantombone/exemplarsvm/raw/v1/images/sa_teaser.png)
-Tomasz Malisiewicz. **Exemplar-based Representations for Object Detection, Association and Beyond.** PhD Dissertation, tech. report CMU-RI-TR-11-32. August, 2011. [PDF](http://www.cs.cmu.edu/~tmalisie/thesis/malisiewicz_thesis.pdf)
+Abstract
-###See our SIGGRAPH ASIA 2011 paper for image on image matching:
-Abhinav Shrivastava, Tomasz Malisiewicz, Abhinav Gupta, Alexei A. Efros. **Data-driven Visual Similarity for Cross-domain Image Matching.** In SIGGRAPH ASIA, December 2011. [PDF](http://www.cs.cmu.edu/~tmalisie/projects/sa11/shrivastava-sa11.pdf) [Project Page](http://graphics.cs.cmu.edu/projects/crossDomainMatching/)
+The goal of this work is to find visually similar images even if they
+appear quite different at the raw pixel level. This task is
+particularly important for matching images across visual domains, such
+as photos taken over different seasons or lighting conditions,
+paintings, hand-drawn sketches, etc. We propose a surprisingly simple
+method that estimates the relative importance of different features in
+a query image based on the notion of "data-driven uniqueness". We
+employ standard tools from discriminative object detection in a novel
+way, yielding a generic approach that does not depend on a particular
+image representation or a specific visual domain. Our approach shows
+good performance on a number of difficult cross-domain visual tasks
+e.g., matching paintings or sketches to real photographs. The method
+also allows us to demonstrate novel applications such as Internet
+re-photography, and painting2gps.
-----
-This object recognition library uses some great software:
+---
-* [libsvm-3.0-1](http://www.csie.ntu.edu.tw/~cjlin/libsvm/)
+More details and experimental evaluation can be found in my PhD thesis, available to download as a PDF.
-* fast blas convolution code (from [voc-release-4.0](http://www.cs.brown.edu/~pff/latent/)),
+[Tomasz Malisiewicz](http://www.cs.cmu.edu/~tmalisie/). **Exemplar-based Representations for Object Detection, Association and Beyond.** PhD Dissertation, tech. report CMU-RI-TR-11-32. August, 2011. [PDF](http://www.cs.cmu.edu/~tmalisie/thesis/malisiewicz_thesis.pdf)
-* 31-D HOG feature code (from [voc-release-3.1](http://www.cs.brown.edu/~pff/latent/)),
+----
+
+This object recognition library uses some great open-source software:
+
+* Linear SVM training: [libsvm-3.0-1](http://www.csie.ntu.edu.tw/~cjlin/libsvm/)
+
+* Fast blas convolution code (from [voc-release-4.0](http://www.cs.brown.edu/~pff/latent/)),
+
+* HOG feature code (31-D) (from [voc-release-3.1](http://www.cs.brown.edu/~pff/latent/)),
* [VOC development/evaluation code](http://pascallin.ecs.soton.ac.uk/challenges/VOC/) imported from the PASCAL VOC website
+
+----
+
+# MATLAB Quick Start Guide
+
+To get started, you need to install MATLAB and download the code from Github. This code has been tested on Mac OS X and Linux. Pre-compiled Mex files for Mac OS X and Linux are included.
+
+## Download Exemplar-SVM Library source code (MATLAB and C++)
+``` sh
+$ cd ~/projects/
+$ git clone git@github.com:quantombone/exemplarsvm.git
+$ cd ~/projects/exemplarsvm
+```
+
+## Make sure Exemplar-SVM library is compiled and working (You shouldn't have to do this on Mac OS X or Linux)
+``` sh
+$ matlab
+$ >> cd features/
+$ >> features_compile;
+$ >> cd ../util/
+$ >> util_compile;
+$ >> cd ../libsvm/
+$ >> libsvm_compile;
+```
+
+## Download and load pre-trained VOC2007 model(s)
+``` sh
+$ matlab
+$ addpath(genpath(pwd))
+$ >> esvm_download_models('bus');
+$ >> load voc2007-bus.mat #vars "models", "M" and URL-based "test_set" are loaded
+```
+
+You can alternatively download the pre-trained models individually from [http://people.csail.mit.edu/tomasz/exemplarsvm/models/](http://people.csail.mit.edu/tomasz/exemplarsvm/models/) or a tar file of all models [voc2007-models.tar](http://people.csail.mit.edu/tomasz/exemplarsvm/models/voc2007-models.tar) (NOTE: 449MB)
+
+``` sh
+$ cd ~/projects/exemplarsvm/
+$ wget http://people.csail.mit.edu/~tomasz/exemplarsvm/voc2007-models.tar
+$ tar -xf voc2007-models.tar
+```
+
+then in MATLAB, you can load models by their name:
+
+``` sh
+$ matlab
+$ >> load voc2007_bus.mat
+```
+
+
+## Apply models to a set of images (test_set)
+
+``` sh
+$ >> esvm_demo_apply_exemplars(test_set, models, M);
+```
+
+Or load your own image
+
+``` sh
+$ matlab
+$ >> I = imread('image1.png'); #load your own image
+$ >> esvm_demo_apply_exemplars(I, models, M);
+```
+
+Or load your own set of images
+
+``` sh
+$ matlab
+$ >> I1 = imread('image1.png'); #your own image
+$ >> ...
+$ >> IN = imread('imageN.png'); #your own image
+$ >> Iarray = {I1, ..., IN};
+$ >> esvm_demo_apply_exemplars(Iarray, models, M)
+```
+
+Or process a directory of images
+
+``` sh
+$ matlab
+$ >> Idirectory = '~/images/';
+$ >> esvm_demo_apply_exemplars(Idirectory, models, M)
+```
+
---
-## Quickstart Guide
- * For training your own exemplars, see the notes in [exemplarsvm/demos/README.md](https://github.com/quantombone/exemplarsvm/blob/master/demos/README.md) and the main training script in [exemplarsvm/demos/voc_demo_esvm.m](https://github.com/quantombone/exemplarsvm/blob/master/demos/voc_demo_esvm.m)
-
- * For evaluating the PASCAL VOC 2007 pre-trained exemplars, see the notes in [exemplarsvm/demos/README.md](https://github.com/quantombone/exemplarsvm/blob/master/demos/README.md) and the main evaluation function in [exemplarsvm/demos/voc_demo_apply.m](https://github.com/quantombone/exemplarsvm/blob/master/demos/voc_demo_apply.m)
+# Train and Test an Ensemble of Exemplar-SVMs from scratch
+
+
+The training scripts are designed to work with the PASCAL VOC 2007
+dataset, so we need to download that first.
+
+## Install PASCAL VOC 2007 trainval/test sets
+``` sh
+$ mkdir /nfs/baikal/tmalisie/pascal #Make a directory for the PASCAL VOC data
+$ cd /nfs/baikal/tmalisie/pascal
+$ wget http://pascallin.ecs.soton.ac.uk/challenges/VOC/voc2007/VOCtrainval_06-Nov-2007.tar
+$ wget http://pascallin.ecs.soton.ac.uk/challenges/VOC/voc2007/VOCtest_06-Nov-2007.tar
+$ tar xf VOCtest_06-Nov-2007.tar
+$ tar xf VOCtrainval_06-Nov-2007.tar
+```
+
+You can also get the VOC 2007 dataset tar files manually, [VOCtrainval_06-Nov-2007.tar](http://pascallin.ecs.soton.ac.uk/challenges/VOC/voc2007/VOCtrainval_06-Nov-2007.tar) and [VOCtest_06-Nov-2007.tar](http://pascallin.ecs.soton.ac.uk/challenges/VOC/voc2007/VOCtest_06-Nov-2007.tar)
+
+
+## Edit directories in esvm_script_train_voc_class.m
+``` sh
+data_directory = '/your/directory/to/pascal/VOCdevkit/';
+results_directory = '/your/results/directory/';
+```
+
+## Training and Evaluating an Ensemble of "bus" Exemplar-SVMs
+``` sh
+$ matlab
+$ addpath(genpath(pwd))
+$ >> [models,M] = esvm_script_train_voc_class('bus');
+# All output (models, M-matrix, AP curve) has been written to results_directory
+```
+
+# Extra: How to run the Exemplar-SVM framework on a cluster?
+
+This library was meant to run on a cluster with a shared NFS/AFS file
+structure where all nodes can read/write data from a common data
+source/target. The PASCAL VOC dataset must be installed on such a
+shared resource and the results directory as well. The idea is that
+results are written as .mat files and intermediate work is protected
+via lock files. Lock files are temporary files (they are directories
+actually) which are deleted once something has finished process. This
+means that the entire voc training script can be replicated across a
+cluster, you can run the script 200x times and the training will
+happen in parallel.
+
+To run ExemplarSVM on a cluster, first make sure you have a cluster,
+use an ssh-based launcher such as my
+[warp_scripts](https://github.com/quantombone/warp_scripts) github
+project. I have used warp_starter.sh at CMU (using WARP cluster)
+and sc.sh at MIT (using the continents).
+
+### Here is the command I often use at MIT to start Exemplar-SVM runs, where machine_list.sh contains computer names
+``` sh
+$ cd ~/warp_scripts/
+$ ./sc.sh "cd ~/projects/exemplarsvm; addpath(genpath(pwd)); esvm_script_train_voc_class('train');"
+```
+
---
**Copyright (C) 2011 by Tomasz Malisiewicz**
View
@@ -73,7 +73,7 @@
[ap, apold, rec, prec, fp, tp, is_correct] = get_aps(VOCopts,draw,cls,gtids,gt,npos,ids,confidence,BB);
finaltime = toc(sss);
-fprintf(1,'final time is %.3f\n',finaltime);
+fprintf(1,'Time for computing AP: %.3fsec\n',finaltime);
function [ap,apold,rec,prec,fp,tp,is_correct] = get_aps(VOCopts,draw,cls,gtids,gt,npos,ids,confidence,BB);
@@ -113,6 +113,7 @@
% assign detection to ground truth object if any
bb=BB(:,d);
ovmax=-inf;
+
for j=1:size(gt(i).BB,2)
bbgt=gt(i).BB(:,j);
bi=[max(bb(1),bbgt(1)) ; max(bb(2),bbgt(2)) ; min(bb(3),bbgt(3)) ; min(bb(4),bbgt(4))];
Oops, something went wrong.

0 comments on commit 46069c2

Please sign in to comment.