Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

added sample citation file

  • Loading branch information...
commit 9c697eaaa9e6495e4829066d8d796fb7d7b1e7b1 1 parent a2daf50
@cathywu authored
View
63 egpaper_final.aux
@@ -1,46 +1,27 @@
\relax
+\citation{Authors11}
\@writefile{toc}{\contentsline {section}{\numberline {1}\hskip -1em.\nobreakspace {}Introduction}{1}}
\@writefile{toc}{\contentsline {section}{\numberline {2}\hskip -1em.\nobreakspace {}Previous Work}{1}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}\hskip -1em.\nobreakspace {}Language}{1}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}\hskip -1em.\nobreakspace {}Dual submission}{1}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}\hskip -1em.\nobreakspace {}Paper length}{1}}
-\citation{Authors11}
-\citation{Authors11b}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.4}\hskip -1em.\nobreakspace {}The ruler}{2}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.5}\hskip -1em.\nobreakspace {}Mathematics}{2}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.6}\hskip -1em.\nobreakspace {}Blind review}{2}}
-\citation{Alpher02}
-\citation{Alpher03}
-\citation{Alpher04}
-\citation{Alpher03}
-\citation{Alpher03}
-\citation{Alpher03}
-\citation{Alpher02}
-\citation{Authors06}
-\citation{Alpher02}
-\citation{Alpher03}
-\citation{Authors06}
-\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Example of caption. It is set in Roman so that mathematics (always set in Roman: $B \qopname \relax o{sin}A = A \qopname \relax o{sin}B$) may be included without an ugly clash.}}{3}}
-\newlabel{fig:long}{{1}{3}}
-\newlabel{fig:onecol}{{1}{3}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {2.7}\hskip -1em.\nobreakspace {}Miscellaneous}{3}}
-\@writefile{toc}{\contentsline {section}{\numberline {3}\hskip -1em.\nobreakspace {}Formatting your paper}{3}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}\hskip -1em.\nobreakspace {}Margins and page numbering}{3}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}\hskip -1em.\nobreakspace {}Type-style and fonts}{3}}
-\citation{Authors06}
-\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Example of a short caption, which should be centered.}}{4}}
-\newlabel{fig:short}{{2}{4}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}\hskip -1em.\nobreakspace {}Footnotes}{4}}
-\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Results. Ours is better.}}{4}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}\hskip -1em.\nobreakspace {}Appendix A}{4}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}\hskip -1em.\nobreakspace {}Appendix B}{4}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.6}\hskip -1em.\nobreakspace {}References}{4}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.7}\hskip -1em.\nobreakspace {}Illustrations, graphs, and photographs}{4}}
-\@writefile{toc}{\contentsline {subsection}{\numberline {3.8}\hskip -1em.\nobreakspace {}Color}{4}}
+\@writefile{toc}{\contentsline {section}{\numberline {3}\hskip -1em.\nobreakspace {}The User Review Domain}{1}}
+\@writefile{toc}{\contentsline {section}{\numberline {4}\hskip -1em.\nobreakspace {}Machine Learning Methods}{2}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}\hskip -1em.\nobreakspace {}The Naive Bayes Classifier}{2}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}\hskip -1em.\nobreakspace {}The Maximum Entropy Classifier}{2}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}\hskip -1em.\nobreakspace {}The Support Vector Machine Classifier}{2}}
+\@writefile{toc}{\contentsline {section}{\numberline {5}\hskip -1em.\nobreakspace {}Experimental Setup}{3}}
+\@writefile{toc}{\contentsline {section}{\numberline {6}\hskip -1em.\nobreakspace {}Results}{3}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}\hskip -1em.\nobreakspace {}Feature Counting Method}{3}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}\hskip -1em.\nobreakspace {}Conditional Independence Assumption}{3}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.3}\hskip -1em.\nobreakspace {}Number of Features}{3}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.4}\hskip -1em.\nobreakspace {}Negation Tagging}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.5}\hskip -1em.\nobreakspace {}Position Tagging}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.6}\hskip -1em.\nobreakspace {}Part of Speech Tagging}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.7}\hskip -1em.\nobreakspace {}Adjectives}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.8}\hskip -1em.\nobreakspace {}Verbs}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.9}\hskip -1em.\nobreakspace {}Majority Voting}{4}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {6.10}\hskip -1em.\nobreakspace {}Neighboring Domain Data}{4}}
\bibstyle{ieee}
\bibdata{egbib}
-\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces 3-fold cross validation results on movie dataset. Values repesent positive, negative, or overall accuracy.}}{5}}
-\@writefile{toc}{\contentsline {section}{\numberline {4}\hskip -1em.\nobreakspace {}Final copy}{5}}
-\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Test results on Yelp dataset with Naive Bayes classifier. Values repesent percent of reviews classified as positive for a given star rating.}}{6}}
-\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Test results on Yelp dataset with Maximum Entropy classifier. Values repesent percent of reviews classified as positive for a given star rating.}}{6}}
-\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Test results on Yelp dataset with SVM classifier. Values repesent percent of reviews classified as positive for a given star rating.}}{6}}
+\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces 3-fold cross validation results on movie dataset. Values repesent positive, negative, or overall accuracy.}}{6}}
+\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Test results on Yelp dataset with Naive Bayes classifier. Values repesent percent of reviews classified as positive for a given star rating.}}{7}}
+\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Test results on Yelp dataset with Maximum Entropy classifier. Values repesent percent of reviews classified as positive for a given star rating.}}{7}}
+\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Test results on Yelp dataset with SVM classifier. Values repesent percent of reviews classified as positive for a given star rating.}}{7}}
View
8 egpaper_final.blg
@@ -1,20 +1,20 @@
This is BibTeX, Version 0.99c (TeX Live 2009/Debian)
The top-level auxiliary file: egpaper_final.aux
I couldn't open style file ieee.bst
----line 40 of file egpaper_final.aux
+---line 22 of file egpaper_final.aux
: \bibstyle{ieee
: }
I'm skipping whatever remains of this command
I couldn't open database file egbib.bib
----line 41 of file egpaper_final.aux
+---line 23 of file egpaper_final.aux
: \bibdata{egbib
: }
I'm skipping whatever remains of this command
I found no database files---while reading file egpaper_final.aux
I found no style file---while reading file egpaper_final.aux
-You've used 6 entries,
+You've used 1 entry,
0 wiz_defined-function locations,
- 97 strings with 613 characters,
+ 87 strings with 527 characters,
and the built_in function-call counts, 0 in all, are:
= -- 0
> -- 0
View
160 egpaper_final.log
@@ -1,4 +1,4 @@
-This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=pdflatex 2011.11.2) 5 FEB 2012 21:15
+This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=pdflatex 2011.11.2) 5 FEB 2012 21:26
entering extended mode
%&-line parsing enabled.
**egpaper_final.tex
@@ -208,6 +208,15 @@ File: ot1pcr.fd 2001/06/04 font definitions for OT1/pcr.
)
LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <12> not available
(Font) Font shape `OT1/ptm/b/n' tried instead on input line 49.
+LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <10> not available
+(Font) Font shape `OT1/ptm/b/n' tried instead on input line 50.
+
+
+LaTeX Warning: Citation `Authors11' on page 1 undefined on input line 50.
+
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
Missing character: There is no � in font ptmr7t!
Missing character: There is no � in font ptmr7t!
Missing character: There is no � in font ptmr7t!
@@ -217,93 +226,134 @@ Missing character: There is no
Missing character: There is no � in font ptmr7t!
Missing character: There is no � in font ptmr7t!
Missing character: There is no � in font ptmr7t!
-LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <10> not available
-(Font) Font shape `OT1/ptm/b/n' tried instead on input line 75.
- [1{/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map}
-
-
-]
-
-LaTeX Warning: Citation `Authors11' on page 2 undefined on input line 183.
-
-
-LaTeX Warning: Citation `Authors11b' on page 2 undefined on input line 195.
-
-[2]
-
-LaTeX Warning: Citation `Alpher02' on page 3 undefined on input line 267.
-
-
-LaTeX Warning: Citation `Alpher03' on page 3 undefined on input line 268.
-
-LaTeX Warning: Citation `Alpher04' on page 3 undefined on input line 268.
+Underfull \hbox (badness 3088) in paragraph at lines 65--66
+[]\OT1/ptm/m/n/10 For our ex-per-i-ments, we worked with movie re-
+ []
-LaTeX Warning: Citation `Alpher03' on page 3 undefined on input line 270.
+Underfull \hbox (badness 1803) in paragraph at lines 65--66
+\OT1/ptm/m/n/10 views. Our data source was Pangs re-leased dataset
+ []
-LaTeX Warning: Citation `Alpher03' on page 3 undefined on input line 271.
+Underfull \hbox (badness 10000) in paragraph at lines 65--66
+\OT1/ptm/m/n/10 (http://www.cs.cornell.edu/people/pabo/movie-review-
+ []
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
-LaTeX Warning: Citation `Alpher03' on page 3 undefined on input line 276.
+Underfull \vbox (badness 10000) has occurred while \output is active []
+ [1{/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map}
-LaTeX Warning: Citation `Alpher02' on page 3 undefined on input line 276.
+]
+Overfull \hbox (2.72952pt too wide) detected at line 84
+\OT1/cmr/m/n/10 = \OML/cmm/m/it/10 P\OT1/cmr/m/n/10 (\OML/cmm/m/it/10 C\OT1/cmr
+/m/n/10 )\OML/cmm/m/it/10 P\OT1/cmr/m/n/10 (\OML/cmm/m/it/10 F[]\OMS/cmsy/m/n/1
+0 j\OML/cmm/m/it/10 C\OT1/cmr/m/n/10 )\OML/cmm/m/it/10 P\OT1/cmr/m/n/10 (\OML/c
+mm/m/it/10 F[]\OMS/cmsy/m/n/10 j\OML/cmm/m/it/10 C; F[]\OT1/cmr/m/n/10 )\OML/cm
+m/m/it/10 P\OT1/cmr/m/n/10 (\OML/cmm/m/it/10 F[]; F[]; [] ; F[]\OMS/cmsy/m/n/10
+ j\OML/cmm/m/it/10 C; F[]; F[]\OT1/cmr/m/n/10 )[]
+ []
-LaTeX Warning: Citation `Authors06' on page 3 undefined on input line 276.
+Underfull \hbox (badness 10000) in paragraph at lines 105--106
+[]\OT1/ptm/m/n/10 [http://nlp.stanford.edu/IR-
+ []
-LaTeX Warning: Citation `Alpher02' on page 3 undefined on input line 277.
+Underfull \hbox (badness 10000) in paragraph at lines 105--106
+\OT1/ptm/m/n/10 book/html/htmledition/naive-bayes-text-classification-
+ []
-LaTeX Warning: Citation `Alpher03' on page 3 undefined on input line 277.
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+[2]
+Underfull \hbox (badness 1688) in paragraph at lines 145--146
+\OT1/ptm/m/n/10 tions well, in part be-cause the pos-i-tive and neg-a-tive
+ []
-LaTeX Warning: Citation `Authors06' on page 3 undefined on input line 277.
+Overfull \hbox (58.7314pt too wide) in paragraph at lines 145--146
+\OT1/ptm/m/n/10 [http://www.cs.unb.ca/profs/hzhang/publications/FLAIRS04ZhangH.
+pdf].
+ []
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
[3]
-Overfull \hbox (4.21208pt too wide) in paragraph at lines 369--414
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+Missing character: There is no � in font ptmr7t!
+ [4]
+Overfull \hbox (4.21208pt too wide) in paragraph at lines 199--244
[][]
[]
-
-LaTeX Warning: Citation `Authors06' on page 4 undefined on input line 498.
-
-
-Underfull \vbox (badness 5652) has occurred while \output is active []
-
- [4]
(./egpaper_final.bbl) [5
-] [6] (./egpaper_final.aux)
+] [6] [7] (./egpaper_final.aux)
LaTeX Warning: There were undefined references.
)
Here is how much of TeX's memory you used:
- 2173 strings out of 495061
- 26125 string characters out of 1182621
- 139749 words of memory out of 3000000
- 5307 multiletter control sequences out of 15000+50000
- 33404 words of font info for 80 fonts, out of 3000000 for 9000
+ 2139 strings out of 495061
+ 25791 string characters out of 1182621
+ 134725 words of memory out of 3000000
+ 5284 multiletter control sequences out of 15000+50000
+ 24844 words of font info for 61 fonts, out of 3000000 for 9000
28 hyphenation exceptions out of 8191
32i,9n,24p,794b,314s stack positions out of 5000i,500n,10000p,200000b,50000s
{/usr/share/texmf-texlive/fonts/enc/dvips/base/8r.enc}</usr/share/texmf-texli
-ve/fonts/type1/public/amsfonts/cm/cmmi10.pfb></usr/share/texmf-texlive/fonts/ty
-pe1/public/amsfonts/cm/cmmi7.pfb></usr/share/texmf-texlive/fonts/type1/public/a
-msfonts/cm/cmmi9.pfb></usr/share/texmf-texlive/fonts/type1/public/amsfonts/cm/c
-mr10.pfb></usr/share/texmf-texlive/fonts/type1/public/amsfonts/cm/cmr7.pfb></us
-r/share/texmf-texlive/fonts/type1/public/amsfonts/cm/cmr9.pfb></usr/share/texmf
--texlive/fonts/type1/public/amsfonts/cm/cmsy10.pfb></usr/share/texmf-texlive/fo
-nts/type1/public/amsfonts/cm/cmti10.pfb></usr/share/texmf-texlive/fonts/type1/u
-rw/courier/ucrr8a.pfb></usr/share/texmf-texlive/fonts/type1/urw/times/utmb8a.pf
-b></usr/share/texmf-texlive/fonts/type1/urw/times/utmr8a.pfb></usr/share/texmf-
-texlive/fonts/type1/urw/times/utmri8a.pfb>
-Output written on egpaper_final.pdf (6 pages, 162579 bytes).
+ve/fonts/type1/public/amsfonts/cm/cmex10.pfb></usr/share/texmf-texlive/fonts/ty
+pe1/public/amsfonts/cm/cmmi10.pfb></usr/share/texmf-texlive/fonts/type1/public/
+amsfonts/cm/cmmi7.pfb></usr/share/texmf-texlive/fonts/type1/public/amsfonts/cm/
+cmr10.pfb></usr/share/texmf-texlive/fonts/type1/public/amsfonts/cm/cmr7.pfb></u
+sr/share/texmf-texlive/fonts/type1/public/amsfonts/cm/cmsy10.pfb></usr/share/te
+xmf-texlive/fonts/type1/public/amsfonts/cm/cmsy7.pfb></usr/share/texmf-texlive/
+fonts/type1/urw/courier/ucrr8a.pfb></usr/share/texmf-texlive/fonts/type1/urw/ti
+mes/utmb8a.pfb></usr/share/texmf-texlive/fonts/type1/urw/times/utmr8a.pfb></usr
+/share/texmf-texlive/fonts/type1/urw/times/utmri8a.pfb>
+Output written on egpaper_final.pdf (7 pages, 160339 bytes).
PDF statistics:
- 70 PDF objects out of 1000 (max. 8388607)
+ 71 PDF objects out of 1000 (max. 8388607)
0 named destinations out of 1000 (max. 500000)
1 words of extra memory for PDF output out of 10000 (max. 10000000)
View
BIN  egpaper_final.pdf
Binary file not shown
View
2  egpaper_final.tex
@@ -47,7 +47,7 @@
%%%%%%%%% ABSTRACT
\begin{abstract}
-We implement a series of classifiers (Naive Bayes, Maximum Entropy, and SVM) to distinguish positive and negative sentiment in critic and user reviews. We apply various processing methods, including negation tagging, part-of-speech tagging, and position tagging to achieve maximum accuracy. We test our classifiers on an external dataset to see how well they generalize. Finally, we use a majority-voting technique to combine classifiers and achieve accuracy of close to 90\% in 3-fold cross-validation.
+We implement a series of classifiers (Naive Bayes, Maximum Entropy, and SVM) to distinguish positive and negative sentiment in critic and user reviews. We apply various processing methods, including negation tagging, part-of-speech tagging, and position tagging to achieve maximum accuracy. We test our classifiers on an external dataset to see how well they generalize. Finally, we use a majority-voting technique to combine classifiers and achieve accuracy of close to 90\% in 3-fold cross-validation\cite{Authors11}.
\end{abstract}
%%%%%%%%% BODY TEXT
View
472 egpaper_final.tex~
@@ -61,310 +61,140 @@ We set out to replicate Pang’s work from 2002 on using classical knowledge-fre
In addition to replicating Pang’s work as closely as we could, we extended the work by exploring an additional dataset, additional preprocessing techniques, and combining classifiers. We tested how well classifiers trained on Pang’s dataset extended to reviews in another domain. Although Pang limited many of his tests to use only the 16165 most common ngrams, advanced processors have lifted this computational constraint, and so we additionally tested on all ngrams. We use a newer parameter estimation algorithm called Limited-Memory Variable Metric (L-BFGS) for maximum entropy classification. Pang used the Improved Iterative Scaling method. We also implemented and tested the effect of term frequency-inverse document frequency (TF-IDF) on classification results.
-%-------------------------------------------------------------------------
-\subsection{Language}
-
-All manuscripts must be in English.
-
-\subsection{Dual submission}
-
-By submitting a manuscript to CVPR, the authors assert that it has not been
-previously published in substantially similar form. Furthermore, no paper
-which contains significant overlap with the contributions of this paper
-either has been or will be submitted during the CVPR 2011 review period to
-{\bf either a journal} or any conference (including CVPR 2011) or any
-workshop (including CVPR2011 workshops)
- {\bf Note that
- this is consistent with CVPR2010 but a strengthening from some previous CVPR
- policy}. Papers violating this condition will be rejected and a list of violating authors may be included in the proceedings.
-
-If there are papers that may appear to the reviewers
-to violate this condition, then it is your responsibility to: (1)~cite
-these papers (preserving anonymity as described in Section 1.6 below),
-(2)~argue in the body of your paper why your CVPR paper is non-trivially
-different from these concurrent submissions, and (3)~include anonymized
-versions of those papers in the supplemental material.
-
-\subsection{Paper length}
-CVPR papers may be between 6 pages and 8 pages, with a \$100 per page added
-fee. Overlength papers will simply not be reviewed. This includes papers
-where the margins and formatting are deemed to have been significantly
-altered from those laid down by this style guide. Note that this
-\LaTeX\ guide already sets figure captions and references in a smaller font.
-The reason such papers will not be reviewed is that there is no provision for
-supervised revisions of manuscripts. The reviewing process cannot determine
-the suitability of the paper for presentation in eight pages if it is
-reviewed in eleven. If you submit 8 for review expect to pay the added page
-charges for them.
+\section{The User Review Domain}
+For our experiments, we worked with movie reviews. Our data source was Pang’s released dataset (http://www.cs.cornell.edu/people/pabo/movie-review-data/) from their 2004 publication. The dataset contains 1000 positive reviews and 1000 negative reviews, each labeled with their true sentiment. The original data source was the Internet Movie Database (IMDb).
-%-------------------------------------------------------------------------
-\subsection{The ruler}
-The \LaTeX\ style defines a printed ruler which should be present in the
-version submitted for review. The ruler is provided in order that
-reviewers may comment on particular lines in the paper without
-circumlocution. If you are preparing a document using a non-\LaTeX\
-document preparation system, please arrange for an equivalent ruler to
-appear on the final output pages. The presence or absence of the ruler
-should not change the appearance of any other content on the page. The
-camera ready copy should not contain a ruler. (\LaTeX\ users may uncomment
-the \verb'\cvprfinalcopy' command in the document preamble.) Reviewers:
-note that the ruler measurements do not align well with lines in the paper
---- this turns out to be very difficult to do well when the paper contains
-many figures and equations, and, when done, looks ugly. Just use fractional
-references (e.g.\ this line is $095.5$), although in most cases one would
-expect that the approximate location will be adequate.
-
-\subsection{Mathematics}
-
-Please number all of your sections and displayed equations. It is
-important for readers to be able to refer to any particular equation. Just
-because you didn't refer to it in the text doesn't mean some future reader
-might not need to refer to it. It is cumbersome to have to use
-circumlocutions like ``the equation second from the top of page 3 column
-1''. (Note that the ruler will not be present in the final copy, so is not
-an alternative to equation numbers). All authors will benefit from reading
-Mermin's description of how to write mathematics.%: \url{http://www.cvpr.org/doc/mermin.pdf}.
-
-
-\subsection{Blind review}
-
-Many authors misunderstand the concept of anonymizing for blind
-review. Blind review does not mean that one must remove
-citations to one's own work---in fact it is often impossible to
-review a paper unless the previous citations are known and
-available.
-
-Blind review means that you do not use the words ``my'' or ``our''
-when citing previous work. That is all. (But see below for
-techreports)
-
-Saying ``this builds on the work of Lucy Smith [1]'' does not say
-that you are Lucy Smith, it says that you are building on her
-work. If you are Smith and Jones, do not say ``as we show in
-[7]'', say ``as Smith and Jones show in [7]'' and at the end of the
-paper, include reference 7 as you would any other cited work.
-
-An example of a bad paper just asking to be rejected:
-\begin{quote}
-\begin{center}
- An analysis of the frobnicatable foo filter.
-\end{center}
-
- In this paper we present a performance analysis of our
- previous paper [1], and show it to be inferior to all
- previously known methods. Why the previous paper was
- accepted without this analysis is beyond me.
-
- [1] Removed for blind review
-\end{quote}
-
-
-An example of an acceptable paper:
-
-\begin{quote}
-\begin{center}
- An analysis of the frobnicatable foo filter.
-\end{center}
-
- In this paper we present a performance analysis of the
- paper of Smith \etal [1], and show it to be inferior to
- all previously known methods. Why the previous paper
- was accepted without this analysis is beyond me.
-
- [1] Smith, L and Jones, C. ``The frobnicatable foo
- filter, a fundamental contribution to human knowledge''.
- Nature 381(12), 1-213.
-\end{quote}
-
-If you are making a submission to another conference at the same time,
-which covers similar or overlapping material, you may need to refer to that
-submission in order to explain the differences, just as you would if you
-had previously published related work. In such cases, include the
-anonymized parallel submission~\cite{Authors11} as additional material and
-cite it as
-\begin{quote}
-[1] Authors. ``The frobnicatable foo filter'', F\&G 2011 Submission ID 324,
-Supplied as additional material {\tt fg324.pdf}.
-\end{quote}
-
-Finally, you may feel you need to tell the reader that more details can be
-found elsewhere, and refer them to a technical report. For conference
-submissions, the paper must stand on its own, and not {\em require} the
-reviewer to go to a techreport for further details. Thus, you may say in
-the body of the paper ``further details may be found
-in~\cite{Authors11b}''. Then submit the techreport as additional material.
-Again, you may not assume the reviewers will read this material.
-
-Sometimes your paper is about a problem which you tested using a tool which
-is widely known to be restricted to a single institution. For example,
-let's say it's 1969, you have solved a key problem on the Apollo lander,
-and you believe that the CVPR11 audience would like to hear about your
-solution. The work is a development of your celebrated 1968 paper entitled
-``Zero-g frobnication: How being the only people in the world with access to
-the Apollo lander source code makes us a wow at parties'', by Zeus \etal.
-
-You can handle this paper like any other. Don't write ``We show how to
-improve our previous work [Anonymous, 1968]. This time we tested the
-algorithm on a lunar lander [name of lander removed for blind review]''.
-That would be silly, and would immediately identify the authors. Instead
-write the following:
-\begin{quotation}
-\noindent
- We describe a system for zero-g frobnication. This
- system is new because it handles the following cases:
- A, B. Previous systems [Zeus et al. 1968] didn't
- handle case B properly. Ours handles it by including
- a foo term in the bar integral.
-
- ...
-
- The proposed system was integrated with the Apollo
- lunar lander, and went all the way to the moon, don't
- you know. It displayed the following behaviours
- which show how well we solved cases A and B: ...
-\end{quotation}
-As you can see, the above text follows standard scientific convention,
-reads better than the first version, and does not explicitly name you as
-the authors. A reviewer might think it likely that the new paper was
-written by Zeus \etal, but cannot make any decision based on that guess.
-He or she would have to be sure that no other authors could have been
-contracted to solve problem B.
-
-FAQ: Are acknowledgements OK? No. Leave them for the final copy.
-
-
-\begin{figure}[t]
-\begin{center}
-\fbox{\rule{0pt}{2in} \rule{0.9\linewidth}{0pt}}
- %\includegraphics[width=0.8\linewidth]{egfigure.eps}
-\end{center}
- \caption{Example of caption. It is set in Roman so that mathematics
- (always set in Roman: $B \sin A = A \sin B$) may be included without an
- ugly clash.}
-\label{fig:long}
-\label{fig:onecol}
-\end{figure}
-
-\subsection{Miscellaneous}
-
-\noindent
-Compare the following:\\
-\begin{tabular}{ll}
- \verb'$conf_a$' & $conf_a$ \\
- \verb'$\mathit{conf}_a$' & $\mathit{conf}_a$
-\end{tabular}\\
-See The \TeX book, p165.
-
-The space after \eg, meaning ``for example'', should not be a
-sentence-ending space. So \eg is correct, {\em e.g.} is not. The provided
-\verb'\eg' macro takes care of this.
-
-When citing a multi-author paper, you may save space by using ``et alia'',
-shortened to ``\etal'' (not ``{\em et.\ al.}'' as ``{\em et}'' is a complete word.)
-However, use it only when there are three or more authors. Thus, the
-following is correct: ``
- Frobnication has been trendy lately.
- It was introduced by Alpher~\cite{Alpher02}, and subsequently developed by
- Alpher and Fotheringham-Smythe~\cite{Alpher03}, and Alpher \etal~\cite{Alpher04}.''
-
-This is incorrect: ``... subsequently developed by Alpher \etal~\cite{Alpher03} ...''
-because reference~\cite{Alpher03} has just two authors. If you use the
-\verb'\etal' macro provided, then you need not worry about double periods
-when used at the end of a sentence as in Alpher \etal.
-
-For this citation style, keep multiple citations in numerical (not
-chronological) order, so prefer \cite{Alpher03,Alpher02,Authors06} to
-\cite{Alpher02,Alpher03,Authors06}.
+Pang applied the bag-of-words method to positive and negative sentiment classification, but the same method can be extended to various other domains, including topic classification. We additionally chose to work with a set of 5000 Yelp reviews, 1000 for each of their five “star” rating. Yelp is a popular online urban city guide that houses reviews of restaurants, shopping areas, and businesses. Although a movie review and a Yelp review will differ in specialized vocabulary, audience, tone, etc., the ways that people convey sentiment (e.g. I loved it!) may not differ entirely. We wished to explore how training classifiers in one domain might generalize to neighbor domains.
+The domain of reviews is experimentally convenient because there are largely available on-line and because reviewers often summarize their overall sentiment with a machine-extractable rating indicator; hence, there was no need for hand-labeling of data.
-\begin{figure*}
-\begin{center}
-\fbox{\rule{0pt}{2in} \rule{.9\linewidth}{0pt}}
-\end{center}
- \caption{Example of a short caption, which should be centered.}
-\label{fig:short}
-\end{figure*}
-%------------------------------------------------------------------------
-\section{Formatting your paper}
+\section{Machine Learning Methods}
+\subsection{The Naive Bayes Classifier}
+The Naive Bayes classifier is an extremely simple classifier that relies on Bayesian probability and the assumption that feature probabilities are independent of one another.
+Baye's Rule gives:
+$$
+P(C | F_1, F_2, \ldots, F_n)
+= \frac{P(C)P(F_1, F_2, \ldots, F_n | C)}{P(F_1, F_2, \ldots, F_n)} \\
+$$
-All text must be in a two-column format. The total allowable width of the
-text area is $6\frac78$ inches (17.5 cm) wide by $8\frac78$ inches (22.54
-cm) high. Columns are to be $3\frac14$ inches (8.25 cm) wide, with a
-$\frac{5}{16}$ inch (0.8 cm) space between them. The main title (on the
-first page) should begin 1.0 inch (2.54 cm) from the top edge of the
-page. The second and following pages should begin 1.0 inch (2.54 cm) from
-the top edge. On all pages, the bottom margin should be 1-1/8 inches (2.86
-cm) from the bottom edge of the page for $8.5 \times 11$-inch paper; for A4
-paper, approximately 1-5/8 inches (4.13 cm) from the bottom edge of the
-page.
+Simplifying the numerator gives:
+$$P(C)P(F_1, F_2, \ldots, F_n | C)\\$$
+$$= P(C)P(F_1 | C)P( F_2, F_3, \ldots, F_n| C, F_1) \\$$
+$$= P(C)P(F_1 | C)P(F_2 | C, F_1)P(F_3, F_4, \ldots, F_n | C, F_1, F_2) \\$$
+$$\ldots$$
-%-------------------------------------------------------------------------
-\subsection{Margins and page numbering}
+Then, assuming the probabilities are independent gives
+$$P(F_i | F_j\ldots F_k) = F(F_i)$$
+so
+$$P(F_i | C, F_j\ldots F_k) = P(F_i | C)$$
+$$P(C | F_1\ldots F_n) = P(C) [\prod_{i=0}^n P(F_i | C) ]$$
-All printed material, including text, illustrations, and charts, must be
-kept within a print area 6-7/8 inches (17.5 cm) wide by 8-7/8 inches
-(22.54 cm) high.
+$P(Fi | C)$ is estimated through plus-one smoothing on a labeled training set, that is:
+$$\frac{(1+count(C, F_i))}{\sum_i count(C_j, F_i))}$$
+where $count(C, F_j)$ is the number of times that $F_j$ appears over all training documents in class $C$.
+The class a feature vector belongs to is given by
+$$C^* = \operatorname*{arg\,max}_C P(C | F_1...F_n)$$
+Taking the logarithm of both sides gives
+$$C^* = \operatorname*{arg\,max}_C (P(C) + \sum_i [F_i (\lg count (C, F_i)$$
+$$ - \lg (\sum_j count C_j, F_i))])$$
-%-------------------------------------------------------------------------
-\subsection{Type-style and fonts}
+While the Naive Bayes classifier seems very simple, it is observed to have high predictive power; in our tests, it performed competitively with the more sophisticated classifiers we used. The Bayes classifier can also be implemented very efficiently. Its independence assumption means that it does not fall prey to the curse of dimensionality, and its running time is linear in the size of the input.
-Wherever Times is specified, Times Roman may also be used. If neither is
-available on your word processor, please use the font closest in
-appearance to Times to which you have access.
+[http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html]
-MAIN TITLE. Center the title 1-3/8 inches (3.49 cm) from the top edge of
-the first page. The title should be in Times 14-point, boldface type.
-Capitalize the first letter of nouns, pronouns, verbs, adjectives, and
-adverbs; do not capitalize articles, coordinate conjunctions, or
-prepositions (unless the title begins with such a word). Leave two blank
-lines after the title.
+\subsection{The Maximum Entropy Classifier}
-AUTHOR NAME(s) and AFFILIATION(s) are to be centered beneath the title
-and printed in Times 12-point, non-boldface type. This information is to
-be followed by two blank lines.
+Maximum Entropy is a general-purpose machine learning technique that provides the least biased estimate possible based on the given information. In other words, “it is maximally noncommittal with regards to missing information” [src]. Importantly, it makes no conditional independence assumption between features, as the Naive Bayes classifier does.
-The ABSTRACT and MAIN TEXT are to be in a two-column format.
+Maximum entropy’s estimate of $P(c|d)$ takes the following exponential form:
+$$P(c|d) = \frac{1}{Z(d)} \exp(\sum_i(\lambda_{i,c} F_{i,c}(d,c)))$$
-MAIN TEXT. Type main text in 10-point Times, single-spaced. Do NOT use
-double-spacing. All paragraphs should be indented 1 pica (approx. 1/6
-inch or 0.422 cm). Make sure your text is fully justified---that is,
-flush left and flush right. Please do not place any additional blank
-lines between paragraphs.
+The $\lambda_{i,c}$’s are feature-weigh parameters, where a large $\lambda_{i,c}$ means that $f_i$ is considered a strong indicator for class $c$. We use 30 iterations of the Limited-Memory Variable Metric (L-BFGS) parameter estimation. Pang used the Improved Iterative Scaling (IIS) method, but L-BFGS, a method that was invented after their paper was published, was found to out-perform both IIS and generalized iterative scaling (GIS), yet another parameter estimation method.
-Figure and table captions should be 9-point Roman type as in
-Figures~\ref{fig:onecol} and~\ref{fig:short}. Short captions should be centred.
+We used Zhang Le’s (2004) Package Maximum Entropy Modeling Toolkit for Python and C++ [link] [src], with no special configuration.
-\noindent Callouts should be 9-point Helvetica, non-boldface type.
-Initially capitalize only the first word of section titles and first-,
-second-, and third-order headings.
+\subsection{The Support Vector Machine Classifier}
-FIRST-ORDER HEADINGS. (For example, {\large \bf 1. Introduction})
-should be Times 12-point boldface, initially capitalized, flush left,
-with one blank line before.
+Support Vector Machines (SVMs) operate by separating points in a d-dimensional space using a (d-1)-dimensional hyperplane, unlike Max-Ent and Naive Bayes classifiers, which use probabilistic measures to classify points. Given a set of training data, the SVM classifier finds a hyperplane with the largest possible margin; that is, it tries finds the hyperplane such that each training point is correctly classified and the hyperplane is as far as possible from the points closest to it. In practice, it is usually not possible to find a hyperplane that separates the classes perfectly, so points are permitted to be inside the margin or on the wrong side of the hyperplane. Any point on or inside the margin is referred to as a support vector, and the hyperplane, given by
+$$f(\vec{B}, B_0) = \{\vec{x} | \vec{x}^T \cdot \vec{B} + B_0 = 0\}$$
+is selected through a constrained quadratic optimization to minimize
+$$ \frac{1}{2} |\vec{B}|^2 + C\sum_i \zeta_i$$
+given
+$$\forall i, \zeta_i \ge 0$$
+$$\forall i, y_i (\vec{x}_i^T \cdot \vec{B} + B0) \ge 1 - \zeta_i $$
-SECOND-ORDER HEADINGS. (For example, { \bf 1.1. Database elements})
-should be Times 11-point boldface, initially capitalized, flush left,
-with one blank line before, and one after. If you require a third-order
-heading (we discourage it), use 10-point Times, boldface, initially
-capitalized, flush left, preceded by one blank line, followed by a period
-and your text on the same line.
+For this paper, we use the PyML implementation of SVMs, which uses the liblinear optimizer to actually find the separating hyperplane. Of the three classifiers, this was the slowest to train, as it suffers from the curse of dimensionalit
-%-------------------------------------------------------------------------
-\subsection{Footnotes}
+\section{Experimental Setup}
+We used documents from the movie review dataset and ran 3-fold cross validation in a number of test configurations. We ignored case and treated punctuation marks as separate lexical items.
-Please use footnotes\footnote {This is what a footnote looks like. It
-often distracts the reader from the main flow of the argument.} sparingly.
-Indeed, try to avoid footnotes altogether and include necessary peripheral
-observations in
-the text (within parentheses, if you prefer, as in this sentence). If you
-wish to use a footnote, place it at the bottom of the column on the page on
-which it is referenced. Use Times 8-point type, single-spaced.
+Our testbed supported testing various parameters: frequency vs. presence of features vs. term frequency-inverse document frequency, unigrams vs. bigrams vs. both, number of features, and type of feature tagging. The types of feature tagging were negation, part of speech (POS), and position. We additionally supported training and testing on only adjectives and verbs. We additionally supported the ability to use the full movie dataset as a training set and using the yelp dataset as a test set.
-%-------------------------------------------------------------------------
-\subsection{Appendix A}
+\section{Results}
+\subsection{Feature Counting Method}
+There are several ways to construct a probability model for a set of document n-grams. The most obvious is to use feature frequency. The value of a feature in a given document is simply the number of times it appears in that document.
+
+As a whole (across all other parameters), training on presence rather than frequency performed on average 5.5\% better for Naive Bayes, ranging from 0\% to 10\% improvement, with no particular outliers in other test configurations, from 73.1\% accuracy with frequency to 78.5\% accuracy with presence. There was no significant difference for SVMs and applying TF-IDF did not provide any improvement from using frequency for either. Both of these comparisons do not apply to Maximum Entropy.
+
+Interestingly, for Naive Bayes, the positive and negative tests performed very differently between presence and frequency tests. Excluding verb tests, which did not exhibit this disparity, positive tests averaged 6.5\% worse (up to 12\% worse in the case) while negative tests averaged 18.9\% better (up to 30\% better) -- with an average aggregate difference of 25.4\% between positive and negative results. By comparison, SVMs exhibited an average aggregate difference of 0.7\%. These results provide evidence that training on presence rather than frequency yields models with less bias.
+
+\subsection{Conditional Independence Assumption}
+
+The Bayes classifier depends on a conditional independence assumption, meaning that the model it predicts assumes that the probability of a given word is independent of the other words. Clearly, this assumption does not hold. Nevertheless, the Bayes classifier functions well, in part because the positive and negative correlations between features tend to cancel each other out [http://www.cs.unb.ca/profs/hzhang/publications/FLAIRS04ZhangH.pdf].
+
+We found a huge difference between results of Naive Bayes and Maximum Entropy for positive testing accuracy and negative testing accuracy. Maximum Entropy, which makes no unfounded assumptions about the data, gave very similar results for positive tests and negative tests with a 0.2\% difference on average. On the other hand, positive and negative results from Naive Bayes, which assumes conditional independence, varies by 27.5\% on average, with the worst cases performing on test configurations using frequency, averaging 40\% difference. These disparities suggest evidence that the movie dataset does not satisfy the conditional independence assumption.
+
+\subsection{Number of Features}
+
+One key decision in a bag-of-words feature set is which words to include. Using more words provides more information, but harms the performance of the classifiers, and words that appear only infrequently in the training data may not present accurate information due to the law of small numbers. We examine results with the entire training data, as well as with only the top 16165 and 2633 unigrams and bigrams.
+
+Using the most frequent unigrams is an extremely simple method of feature selection, and in this case, not a particularly robust one, since feature selection should look for words that identify a given class. Choosing frequent words does not discriminate between the two classes and will select common words like ``the'' and ``it'', which likely are weak sentiment indicators. On the other hand, uncommon words that only appear in a handful or less of reviews will not contribute much to sentiment indication. However, Pang’s motivation for limiting the number of features was for improve testing performance, but our classifiers and processors were fast enough that this was not particularly noticeable.
+
+On average, limiting the number of features from 16165 to 2633, as in the original Pang paper, caused accuracy to drop by 5.2\%, 4.0\%, and 2.8\% for Naive Bayes, Maximum Entropy, and SVM, respectively. These results indicate that valuable sentiment information was lost in the restriction of features.
+
+However, when restricting from all features down to 16165, the results were a wash. Naive Bayes did vaguely worse, Maximum Entropy remained unchanged, and SVMs did vaguely better. These results suggest that uncommon features do not carry much sentiment information. Additionally, this validated Pang’s use of limited features, as they did not significantly impact the results but satisfied their performance constraints.
+
+\subsection{Negation Tagging}
+
+In an effort to preserve the potential value of negation information while using dead-simple features, we tagged words between those expressing negation and the next punctuation mark with a postfix ``\_NOT.'' This distinguishes sentences like ``That movie was very good'' and ``That movie was not very good.'' Diverging from Pang, we also added negation tags to bigrams.
+
+Negation tagging did not appear to have a significant effect on the data. For all the classifiers, the results from negation tagged data were almost the same as the results from the raw data. Nevertheless, we used negation tagging for the remainder of the tests, as it did not seem to hurt performance or accuracy.
+
+The ineffectiveness of negation tagging probably comes from a few sources. First, it increases the number of uncommon features, which, as discussed previously, harms effectiveness and cancels out the increase in semantic awareness. Second, the presence of a “not” does not always indicate negation. Rather, it is often used idiomatically, as in the example fragment ``with his distinctive, more often than not ingenious dialogue''. Finally, the method of tagging all words up to the next punctuation mark is suspect. Only a few words after the not are actually negated, and these often occur after a comma or other punctuation mark.
+
+\subsection{Position Tagging}
+Reviews are split into a beginning, middle, and end, so to see if one section carries more sentiment than another, we split the reviews into a first quarter, a middle half, and a last quarter and tagged the words in each section.
+
+Position tagging was not helpful. For bigrams, it harmed performance by around 5\% in most cases, and for unigrams, it was not helpful. If reviews end up not actually following the model specified or if the model has no bearing on where the relevant data is, position tagging will be harmful because it increases the dimensionality of the input without increasing the information content.
+
+\subsection{Part of Speech Tagging}
+We appended POS tags to every word using Oliver Mason’s Qtag program [src]. This serves as a rough way to disambiguate words that may hold different meanings in different contexts. For example, it would distinguish the different uses of “love” in ``I love this movie'' versus ``This is a love story.'' However, it turns out that word disambiguation is a much more complicated problem, as POS says nothing to distinguish between the meaning of cold in ``I was a bit cold during the movie'' and ``The cold murderer chilled my heart.''
+
+Part of speech tagging was not very helpful for unigram results; in fact, the NB classifier did slightly worse with parts of speech tagged when using unigrams. However, when using bigrams, the MaxEnt and SVM classifiers did significantly better, achieving 3-4\% better accuracy with part of speech tagging when measuring frequency and presence information.
+\subsection{Adjectives}
+Intuitively, adjectives like ``beautiful'', ``wonderful'', and ``great'' hold valuable sentiment information, so we trained our classifiers after filtering out only the adjectives within reviews. On average, adjective tests performed about 6\% worse than their unfiltered negation-tagged counterparts, with no notable difference between the 3 classifiers. These results suggest that the limited information conveyed in adjectives is not representative of the full review itself.
+
+
+\subsection{Verbs}
+As in the motivating example for the use of POS tagging, it was in the case of the verb use of ``love'' (``I love this movie'') that conveyed sentimental information, rather than the adjective use of the word. Interestingly, Pang did not include results for training only on verbs. Even more interestingly, despite the motivating example, verbs under-performed all other tests, while still being consistently better than random. The tests ranged from 60\% to 67\% accuracy, even sometimes doing worse than the 64\% accurate human-based classifier from Pang 2002. We suspect this is in part due to the sparsity of features when only using verbs, as there were on average 37.2 verbs and 55.7 adjectives per review.
+
+\subsection{Majority Voting}
+Given a large ensemble of classifiers, an easy way to combine them is with a simple majority voting scheme. This tends to eliminate weaknesses that exist in only one classifier, but can also eliminate strengths that exist in only one classifier.
+Majority voting in some cases provided a small but significant improvement over the classifiers alone; combining Bayes, MaxEnt, and SVM classifiers over the same data provided a three to four percent boost over the best of the individual classifiers alone.
+
+\subsection{Neighboring Domain Data}
+Mostly out of curiosity, we wanted to see how our test configurations will perform when training on the movie dataset and testing on the Yelp dataset, an external out-of-domain dataset. We preprocessed the Yelp dataset such that it matched the format of the movie dataset and selected 1000 of each of the 1-5 star rating reviews. For evaluation purposes, we scored the accuracy on only 1-star and 5-star reviews, giving our testbed only high-confidence negative and positive reviews, respectively. The score was simply the average of the two accuracies.
+
+Across the board, the classifiers has a harder time with the Yelp dataset as compared to the movie dataset, performing between 56.0\% and 75.2\%. The respective lowest and highest performing configurations scored at 67.0\% and 84.0\% on the movie dataset.
+
+We expected to see worse results, given the difference in vocabulary, subject matter, tone, etc., but all configurations performed better than random. We also saw strong positive trends across all test configurations, classifying reviews with more stars more positively.
+
+
+
+%-------------------------------------------------------------------------
\begin{figure*}
\begin{tabular}{{|l}*{11}{|c}|r|}
\hline
@@ -375,11 +205,11 @@ Domain & Features & \# of features & Frequency & + & - & $\pm$& + &
No-negation & Unigrams & 16165 & Frequency & 0.94 & 0.62 & 0.78 & - & - & - & 0.82 & 0.82 & 0.82 \\
No-negation & Unigrams & 16165 & Presence & 0.87 & 0.72 & 0.82 & 0.85 & 0.87 & 0.86 & 0.85 & 0.84 & 0.84 \\
No-negation & Bigrams & 16165 & Frequency & 0.92 & 0.64 & 0.78 & - & - & - & 0.77 & 0.81 & 0.79 \\
-No-negation & Bigrams & 16165 & Presence & 0.89 & 0.73 & 0.81 & 0.79 & 0.82 & 0.81 & 0.8 & 0.81 & 0.8 \\
+No-negation & Bigrams & 16165 & Presence & 0.89 & 0.73 & 0.81 & 0.79 & 0.82 & 0.81 & 0.8 & 0.81 & 0.80 \\
adjectives & Unigrams & 16165 & Frequency & 0.95 & 0.52 & 0.73 & - & - & - & 0.75 & 0.77 & 0.76 \\
default & Bigrams & 2633 & Frequency & 0.91 & 0.46 & 0.69 & - & - & - & 0.74 & 0.75 & 0.75 \\
default & Bigrams & 16165 & Frequency & 0.92 & 0.64 & 0.78 & - & - & - & 0.78 & 0.79 & 0.78 \\
-default & Unigrams & 2633 & Frequency & 0.96 & 0.5 & 0.74 & - & - & - & 0.81 & 0.79 & 0.8 \\
+default & Unigrams & 2633 & Frequency & 0.96 & 0.5 & 0.74 & - & - & - & 0.81 & 0.79 & 0.80 \\
default & Unigrams & 16165 & Frequency & 0.93 & 0.59 & 0.76 & - & - & - & 0.82 & 0.81 & 0.82 \\
default & Unigrams & maximum & Frequency & 0.95 & 0.49 & 0.72 & - & - & - & 0.82 & 0.81 & 0.82 \\
partofspeech & Bigrams & 16165 & Frequency & 0.96 & 0.47 & 0.71 & - & - & - & 0.82 & 0.82 & 0.82 \\
@@ -396,26 +226,25 @@ default & Unigrams & maximum & Presence & 0.91 & 0.7 & 0.81 & 0.84 & 0.86 & 0.85
partofspeech & Bigrams & 16165 & Presence & 0.89 & 0.73 & 0.81 & 0.84 & 0.84 & 0.84 & 0.79 & 0.82 & 0.8 \\
partofspeech & Unigrams & 16165 & Presence & 0.86 & 0.76 & 0.81 & 0.85 & 0.85 & 0.85 & 0.84 & 0.83 & 0.84 \\
position & Bigrams & 16165 & Presence & 0.87 & 0.66 & 0.76 & 0.82 & 0.83 & 0.82 & 0.73 & 0.76 & 0.74 \\
-position & Unigrams & 16165 & Presence & 0.86 & 0.78 & 0.82 & 0.84 & 0.85 & 0.85 & 0.8 & 0.8 & 0.8 \\
-verbs & Unigrams & maximum & Presence & 0.8 & 0.54 & 0.67 & 0.65 & 0.65 & 0.65 & 0.64 & 0.63 & 0.635 \\
-adjectives & Unigrams & 16165 & TF-IDF & 0.82 & 0.6 & 0.71 & - & - & - & 0.79 & 0.76 & 0.77 \\
+position & Unigrams & 16165 & Presence & 0.86 & 0.78 & 0.82 & 0.84 & 0.85 & 0.85 & 0.80 & 0.80 & 0.80 \\
+verbs & Unigrams & maximum & Presence & 0.80 & 0.54 & 0.67 & 0.65 & 0.65 & 0.65 & 0.64 & 0.63 & 0.635 \\
+adjectives & Unigrams & 16165 & TF-IDF & 0.82 & 0.60 & 0.71 & - & - & - & 0.79 & 0.76 & 0.77 \\
default & Bigrams & 2633 & TF-IDF & 0.92 & 0.46 & 0.69 & - & - & - & 0.76 & 0.71 & 0.74 \\
-default & Bigrams & 16165 & TF-IDF & 0.9 & 0.68 & 0.79 & - & - & - & 0.83 & 0.74 & 0.79 \\
-default & Unigrams & 2633 & TF-IDF & 0.85 & 0.52 & 0.74 & - & - & - & 0.81 & 0.79 & 0.8 \\
-default & Unigrams & 16165 & TF-IDF & 0.88 & 0.68 & 0.78 & - & - & - & 0.83 & 0.77 & 0.8 \\
+default & Bigrams & 16165 & TF-IDF & 0.90 & 0.68 & 0.79 & - & - & - & 0.83 & 0.74 & 0.79 \\
+default & Unigrams & 2633 & TF-IDF & 0.85 & 0.52 & 0.74 & - & - & - & 0.81 & 0.79 & 0.80 \\
+default & Unigrams & 16165 & TF-IDF & 0.88 & 0.68 & 0.78 & - & - & - & 0.83 & 0.77 & 0.80 \\
default & Unigrams & maximum & TF-IDF & 0.86 & 0.65 & 0.76 & - & - & - & 0.83 & 0.78 & 0.81 \\
partofspeech & Bigrams & 16165 & TF-IDF & 0.89 & 0.67 & 0.78 & - & - & - & 0.79 & 0.74 & 0.76 \\
partofspeech & Unigrams & 16165 & TF-IDF & 0.89 & 0.63 & 0.76 & - & - & - & 0.81 & 0.78 & 0.79 \\
position & Bigrams & 16165 & TF-IDF & 0.89 & 0.59 & 0.74 & - & - & - & 0.79 & 0.69 & 0.74 \\
position & Unigrams & 16165 & TF-IDF & 0.91 & 0.61 & 0.76 & - & - & - & 0.81 & 0.71 & 0.76 \\
-verbs & Unigrams & maximum & TF-IDF & 0.64 & 0.57 & 0.6 & - & - & - & 0.62 & 0.66 & 0.64 \\
+verbs & Unigrams & maximum & TF-IDF & 0.64 & 0.57 & 0.60 & - & - & - & 0.62 & 0.66 & 0.64 \\
\hline
\end{tabular}
\caption{3-fold cross validation results on movie dataset. Values repesent positive, negative, or overall accuracy.}
\end{figure*}
%-------------------------------------------------------------------------
-\subsection{Appendix B}
\begin{figure*}
\begin{tabular}{{|l}*{8}{|c}|r|}
@@ -490,65 +319,6 @@ verbs & Unigrams & maximum & Presence & 0.45 & 0.45 & 0.42 & 0.38 & 0.3
\end{figure*}
%-------------------------------------------------------------------------
-\subsection{References}
-
-List and number all bibliographical references in 9-point Times,
-single-spaced, at the end of your paper. When referenced in the text,
-enclose the citation number in square brackets, for
-example~\cite{Authors06}. Where appropriate, include the name(s) of
-editors of referenced books.
-
-\begin{table}
-\begin{center}
-\begin{tabular}{|l|c|}
-\hline
-Method & Frobnability \\
-\hline\hline
-Theirs & Frumpy \\
-Yours & Frobbly \\
-Ours & Makes one's heart Frob\\
-\hline
-\end{tabular}
-\end{center}
-\caption{Results. Ours is better.}
-\end{table}
-
-%-------------------------------------------------------------------------
-\subsection{Illustrations, graphs, and photographs}
-
-All graphics should be centered. Please ensure that any point you wish to
-make is resolvable in a printed copy of the paper. Resize fonts in figures
-to match the font in the body text, and choose line widths which render
-effectively in print. Many readers (and reviewers), even of an electronic
-copy, will choose to print your paper in order to read it. You cannot
-insist that they do otherwise, and therefore must not assume that they can
-zoom in to see tiny details on a graphic.
-
-When placing figures in \LaTeX, it's almost always best to use
-\verb+\includegraphics+, and to specify the figure width as a multiple of
-the line width as in the example below
-{\small\begin{verbatim}
- \usepackage[dvips]{graphicx} ...
- \includegraphics[width=0.8\linewidth]
- {myfile.eps}
-\end{verbatim}
-}
-
-
-%-------------------------------------------------------------------------
-\subsection{Color}
-
-Color is valuable, and will be visible to readers of the electronic copy.
-However ensure that, when printed on a monochrome printer, no important
-information is lost by the conversion to grayscale.
-
-%------------------------------------------------------------------------
-\section{Final copy}
-
-You must include your signed IEEE copyright release form when you submit
-your finished paper. We MUST have this form before your paper can be
-published in the proceedings.
-
{\small
\bibliographystyle{ieee}
View
94 fpbib.bib
@@ -0,0 +1,94 @@
+@inproceedings{Gordon,
+author = "G. Gordon, T.Darrell, M. Harville, and J. Woodfill",
+title = {Background estimation and removal based on range and color},
+booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
+address = {Fort Collins, Colorado},
+pages = {459--454},
+year = 1999
+}
+
+@inproceedings{Jones,
+author = "D. Jones and J. Malik",
+title = {Determining three-dimensional shape from orientation and spatial frequency disparities},
+booktitle = {Proceeding of ECCV},
+address = {Genoa},
+year = 1992
+}
+
+@article{Martin,
+author = "D. Martin, C. Fowlkes, J. Malik",
+title = {Learning to Detect Natural Image Boundaries Using Local Brightness, Color, and Texture Cues},
+journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
+year = 2004,
+volume = 26,
+number = 5,
+pages = {530--549}
+}
+
+@inproceedings{McIvor,
+author = "A. McIvor",
+title = {Background subtraction techniques},
+booktitle = {Proceedings of Image \& Vision Computing New Zealand 2000 IVCNZ’00},
+address = {Auckland, New Zealand},
+year = 2000
+}
+
+@inproceedings{Scott,
+author = "G. Scott and H Longuet-Higgins",
+title = {Feature grouping by relocalisation of eigenvectors of the proximity matrix},
+booktitle = {Proceeding of British Machine Vision Conference},
+pages = {103--108},
+year = 1990
+}
+
+@article{Seitz,
+author = "P. Seitz",
+title = {Using local orientation information as image primitive
+for robust object recognition},
+journal = {SPIE Visual Communications and Image Processing IV},
+pages = {1630--1639},
+volume = {1199},
+number = 1,
+year = 1989
+}
+
+@electronic{Vance,
+ author = "A. Vance",
+ title = "Microsoft's Ambivalence About Kinect Hackers",
+ note = {http://www.businessweek.com/magazine/ content/11\_04/b4212028870272.htm},
+ month = jan,
+ year = "2011"
+}
+
+@article{Wren,
+author = "C. Wren and Y. Ivanov",
+title = {Volumetric Operations with Surface Margins},
+journal = {IEEE Computer Vision and Pattern Recognition Technical Sketches},
+year = 2002
+}
+
+@article{Zabih,
+ author = "R. Zabih and J. Woodfill",
+ title = {Non-parametric local transforms for computing visual correspondence.},
+ journal = {Lecture Notes in Computer Science 800},
+ year = 1994,
+ pages = {151-158}
+}
+
+@inproceedings{Zhang,
+ author = "L. Zhang, B. Curless, and S. M. Seitz",
+ title = "Rapid Shape Acquisition Using Color
+Structured Light and Multi-pass Dynamic Programming",
+ intype = "presented at the",
+ booktitle = "Proceedings of the 1st
+International Symposium on 3D Data Processing, Visualization, and Transmission (3DPVT)",
+ address = "Padova, Italy",
+ year = "2002",
+ pages = "24-36",
+}
+
+@misc{depthmap,
+ title = {Screenshot.png},
+ note = {http://www.vislab.usyd.edu.au/blogs/media/ blogs/baz/Screenshot.png},
+}
+
Please sign in to comment.
Something went wrong with that request. Please try again.