Permalink
Browse files

Removed deprecated code, since 2.0 is not backwards compatible. Resol…

…ves issue 558.

svn/trunk@8778
  • Loading branch information...
1 parent 6323bab commit 0c2984e7989959525a6a37891139d971eb474733 @stevenbird stevenbird committed Apr 10, 2011
View
@@ -193,13 +193,3 @@ def batch_ne_chunk(tagged_sentences, binary=False):
chunker = nltk.data.load(chunker_pickle)
return chunker.batch_parse(tagged_sentences)
-######################################################################
-#{ Deprecated
-######################################################################
-from nltk.internals import Deprecated
-class ChunkParseI(ChunkParserI, Deprecated):
- """Use nltk.ChunkParserI instead."""
-class RegexpChunk(RegexpChunkParser, Deprecated):
- """Use nltk.RegexpChunkParser instead."""
-class Regexp(RegexpParser, Deprecated):
- """Use nltk.RegexpParser instead."""
@@ -113,6 +113,6 @@
import numpy
from maxent import *
__all__ += ['MaxentClassifier', 'BinaryMaxentFeatureEncoding',
- 'ConditionalExponentialClassifier', 'train_maxent_classifier']
+ 'ConditionalExponentialClassifier']
except ImportError:
pass
View
@@ -21,7 +21,7 @@
__all__ = ['ConfusionMatrix', 'accuracy',
'f_measure', 'log_likelihood', 'precision', 'recall',
- 'approxrand', 'edit_distance', 'edit_dist', 'windowdiff',
+ 'approxrand', 'edit_distance', 'windowdiff',
'AnnotationTask', 'spearman_correlation',
'ranks_from_sequence', 'ranks_from_scores',
'NgramAssocMeasures', 'BigramAssocMeasures',
View
@@ -94,46 +94,3 @@
'NonprojectiveDependencyParser', 'MaltParser',
]
-######################################################################
-#{ Deprecated
-######################################################################
-from nltk.internals import Deprecated
-class ParseI(ParserI, Deprecated):
- """Use nltk.ParserI instead."""
-class AbstractParse(AbstractParser, Deprecated):
- """Use nltk.ParserI instead."""
-class RecursiveDescent(RecursiveDescentParser, Deprecated):
- """Use nltk.RecursiveDescentParser instead."""
-class SteppingRecursiveDescent(SteppingRecursiveDescentParser, Deprecated):
- """Use nltk.SteppingRecursiveDescentParser instead."""
-class ShiftReduce(ShiftReduceParser, Deprecated):
- """Use nltk.ShiftReduceParser instead."""
-class SteppingShiftReduce(SteppingShiftReduceParser, Deprecated):
- """Use nltk.SteppingShiftReduceParser instead."""
-class EarleyChartParse(EarleyChartParser, Deprecated):
- """Use nltk.EarleyChartParser instead."""
-class FeatureEarleyChartParse(FeatureEarleyChartParser, Deprecated):
- """Use nltk.FeatureEarleyChartParser instead."""
-class ChartParse(ChartParser, Deprecated):
- """Use nltk.ChartParser instead."""
-class SteppingChartParse(SteppingChartParser, Deprecated):
- """Use nltk.SteppingChartParser instead."""
-class BottomUpChartParse(BottomUpProbabilisticChartParser, Deprecated):
- """Use nltk.BottomUpProbabilisticChartParser instead."""
-class InsideParse(InsideChartParser, Deprecated):
- """Use nltk.InsideChartParser instead."""
-class RandomParse(RandomChartParser, Deprecated):
- """Use nltk.RandomChartParser instead."""
-class UnsortedParse(UnsortedChartParser, Deprecated):
- """Use nltk.UnsortedChartParser instead."""
-class LongestParse(LongestChartParser, Deprecated):
- """Use nltk.LongestChartParser instead."""
-class ViterbiParse(ViterbiParser, Deprecated):
- """Use nltk.ViterbiParser instead."""
-class GrammarFile(Deprecated):
- """Use nltk.data.load() instead."""
- # [xx] had directives: %start, %kimmo, %tagger_file?
- def __init__(self, filename=None, verbose=False):
- raise ValueError("GrammarFile is no longer supported -- "
- "use nltk.data.load() instead.")
-
View
@@ -1076,9 +1076,6 @@ def apply_iter(self, chart, grammar):
if chart.insert(new_edge, ()):
yield new_edge
-class CachedTopDownInitRule(TopDownInitRule, Deprecated):
- """Use L{TopDownInitRule} instead."""
-
class TopDownPredictRule(AbstractChartRule):
"""
A rule licensing edges corresponding to the grammar productions
@@ -1099,9 +1096,6 @@ def apply_iter(self, chart, grammar, edge):
if chart.insert(new_edge, ()):
yield new_edge
-class TopDownExpandRule(TopDownPredictRule, Deprecated):
- """Use TopDownPredictRule instead"""
-
class CachedTopDownPredictRule(TopDownPredictRule):
"""
A cached version of L{TopDownPredictRule}. After the first time
@@ -1142,9 +1136,6 @@ def apply_iter(self, chart, grammar, edge):
# Record the fact that we've applied this rule.
self._done[next, index] = (chart, grammar)
-class CachedTopDownExpandRule(CachedTopDownPredictRule, Deprecated):
- """Use L{CachedTopDownPredictRule} instead."""
-
#////////////////////////////////////////////////////////////
# Bottom-Up Prediction
#////////////////////////////////////////////////////////////
View
@@ -28,7 +28,6 @@
'Valuation', 'Assignment', 'Model', 'Undefined', 'is_rel', 'set2rel', 'arity',
# utility methods
- 'text_parse', 'text_interpret', 'text_evaluate',
'batch_parse', 'batch_interpret', 'batch_evaluate',
'root_semrep',
'parse_valuation_line', 'parse_valuation', 'parse_logic', 'skolemize',
View
@@ -33,23 +33,7 @@
# Stemmers
'RegexpStemmer', 'PorterStemmer', 'LancasterStemmer',
- 'RSLPStemmer', 'WordNetLemmatizer', 'WordnetStemmer',
+ 'RSLPStemmer', 'WordNetLemmatizer',
'ISRIStemmer', 'SnowballStemmer'
]
-######################################################################
-#{ Deprecated
-######################################################################
-from nltk.internals import Deprecated
-class StemI(StemmerI, Deprecated):
- """Use nltk.StemmerI instead."""
-class Regexp(RegexpStemmer, Deprecated):
- """Use nltk.RegexpStemmer instead."""
-class Porter(PorterStemmer, Deprecated):
- """Use nltk.PorterStemmer instead."""
-class Lancaster(LancasterStemmer, Deprecated):
- """Use nltk.LancasterStemmer instead."""
-class Wordnet(WordNetStemmer, Deprecated):
- """Use nltk.WordNetLemmatizer instead."""
-
-
View
@@ -8,8 +8,6 @@
from nltk.corpus.reader.wordnet import NOUN
from nltk.corpus import wordnet as _wordnet
-from nltk.internals import Deprecated
-
class WordNetLemmatizer(object):
"""
@@ -40,17 +38,3 @@ def __repr__(self):
print 'aardwolves ->', wnl.lemmatize('aardwolves')
print 'abaci ->', wnl.lemmatize('abaci')
print 'hardrock ->', wnl.lemmatize('hardrock')
-
-
-class WordnetStemmer(Deprecated, WordNetLemmatizer):
- """Use WordNetLemmatizer instead."""
-
- def __init__(self):
- WordNetLemmatizer.__init__(self)
-
-
-class WordNetStemmer(Deprecated, WordNetLemmatizer):
- """Use WordNetLemmatizer instead."""
-
- def __init__(self):
- WordNetLemmatizer.__init__(self)
@@ -20,7 +20,7 @@
__all__ = ['WhitespaceTokenizer', 'SpaceTokenizer', 'TabTokenizer',
'LineTokenizer', 'RegexpTokenizer', 'BlanklineTokenizer',
- 'WordPunctTokenizer', 'WordTokenizer', 'blankline_tokenize',
+ 'WordPunctTokenizer', 'blankline_tokenize',
'wordpunct_tokenize', 'regexp_tokenize', 'word_tokenize',
'SExprTokenizer', 'sexpr_tokenize', 'line_tokenize',
'PunktWordTokenizer', 'PunktSentenceTokenizer',
View
@@ -15,7 +15,7 @@
import re
import sre_constants
-from nltk.internals import convert_regexp_to_nongrouping, Deprecated
+from nltk.internals import convert_regexp_to_nongrouping
from api import *
from util import *
@@ -151,20 +151,6 @@ class WordPunctTokenizer(RegexpTokenizer):
def __init__(self):
RegexpTokenizer.__init__(self, r'\w+|[^\w\s]+')
-class WordTokenizer(RegexpTokenizer, Deprecated):
- """
- B{If you want to tokenize words, you should probably use
- TreebankWordTokenizer or word_tokenize() instead.}
-
- A tokenizer that divides a text into sequences of alphabetic
- characters. Any non-alphabetic characters are discarded. E.g.:
-
- >>> WordTokenizer().tokenize("She said 'hello'.")
- ['She', 'said', 'hello']
- """
- def __init__(self):
- RegexpTokenizer.__init__(self, r'\w+')
-
######################################################################
#{ Tokenization Functions
######################################################################
View
@@ -17,7 +17,7 @@
from pprint import pprint
from nltk.compat import defaultdict
-from nltk.internals import Deprecated, slice_bounds
+from nltk.internals import slice_bounds
######################################################################
# Short usage message
@@ -969,12 +969,6 @@ def __len__(self):
return max(len(lst) for lst in self._lists)
-class LazyMappedList(Deprecated, LazyMap):
- """Use LazyMap instead."""
- def __init__(self, lst, func):
- LazyMap.__init__(self, func, lst)
-
-
class LazyZip(LazyMap):
"""
A lazy sequence whose elements are tuples, each containing the i-th
@@ -1050,11 +1044,6 @@ def __init__(self, lst):
LazyZip.__init__(self, xrange(len(lst)), lst)
-class LazyMappedChain(Deprecated, LazyConcatenation):
- """Use LazyConcatenation(LazyMap(func, lists)) instead."""
- def __init__(self, lst, func):
- LazyConcatenation.__init__(self, LazyMap(func, lst))
-
######################################################################
# Binary Search in a File
######################################################################

0 comments on commit 0c2984e

Please sign in to comment.