Skip to content

Commit

Permalink
Removed deprecated code, since 2.0 is not backwards compatible. Resol…
Browse files Browse the repository at this point in the history
…ves issue 558.

svn/trunk@8777
  • Loading branch information
stevenbird committed Apr 10, 2011
1 parent a44aae4 commit 6323bab
Show file tree
Hide file tree
Showing 36 changed files with 21 additions and 487 deletions.
20 changes: 2 additions & 18 deletions nltk/classify/api.py
Expand Up @@ -24,7 +24,7 @@
- The number of categories is finite.
- Each text belongs to zero or more categories.
"""
from nltk.internals import deprecated, overridden
from nltk.internals import overridden

##//////////////////////////////////////////////////////
#{ Classification Interfaces
Expand Down Expand Up @@ -92,14 +92,6 @@ def batch_prob_classify(self, featuresets):
"""
return [self.prob_classify(fs) for fs in featuresets]

#{ Deprecated
@deprecated("Use .batch_prob_classify() instead.")
def batch_probdist(self, featuresets):
return self.batch_prob_classify(featuresets)
@deprecated("Use .prob_classify() instead.")
def probdist(self, featureset):
return self.prob_classify(featureset)
#}

class MultiClassifierI(object):
"""
Expand Down Expand Up @@ -163,15 +155,7 @@ def batch_prob_classify(self, featuresets):
"""
return [self.prob_classify(fs) for fs in featuresets]

#{ Deprecated
@deprecated("Use .batch_prob_classify() instead.")
def batch_probdist(self, featuresets):
return self.batch_prob_classify(featuresets)
@deprecated("Use .prob_classify() instead.")
def probdist(self, featureset):
return self.prob_classify(featureset)
#}


# # [XX] IN PROGRESS:
# class SequenceClassifierI(object):
# """
Expand Down
3 changes: 0 additions & 3 deletions nltk/classify/maxent.py
Expand Up @@ -342,9 +342,6 @@ def train(cls, train_toks, algorithm=None, trace=3, encoding=None,
#: Alias for MaxentClassifier.
ConditionalExponentialClassifier = MaxentClassifier

@deprecated('Use MaxentClassifier.train() instead')
def train_maxent_classifier(*args, **kwargs):
return MaxentClassifier.train(*args, **kwargs)

######################################################################
#{ Feature Encodings
Expand Down
6 changes: 0 additions & 6 deletions nltk/cluster/gaac.py
Expand Up @@ -7,8 +7,6 @@

import numpy

from nltk.internals import deprecated

from api import *
from util import *

Expand Down Expand Up @@ -97,10 +95,6 @@ def classify_vectorspace(self, vector):
best = (sim, i)
return best[1]

@deprecated("Use GAAClusterer.dendrogram instead.")
def dendogram(self):
return dendrogram(self)

def dendrogram(self):
"""
@return: The dendrogram representing the current clustering
Expand Down
31 changes: 0 additions & 31 deletions nltk/corpus/reader/api.py
Expand Up @@ -14,7 +14,6 @@
import re

from nltk.compat import defaultdict
from nltk.internals import deprecated
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from nltk.sourcedstring import SourcedStringStream

Expand Down Expand Up @@ -212,16 +211,6 @@ def _get_root(self): return self._root
@type: L{PathPointer}""")

#{ Deprecated since 0.9.7
@deprecated("Use corpus.fileids() instead")
def files(self): return self.fileids()
#}

#{ Deprecated since 0.9.1
@deprecated("Use corpus.fileids() instead")
def _get_items(self): return self.fileids()
items = property(_get_items)
#}

######################################################################
#{ Corpora containing categorized items
Expand Down Expand Up @@ -439,23 +428,3 @@ def _read_parsed_sent_block(self, stream):
#} End of Block Readers
#------------------------------------------------------------

#{ Deprecated since 0.8
@deprecated("Use .raw() or .sents() or .tagged_sents() or "
".parsed_sents() instead.")
def read(self, items=None, format='parsed'):
if format == 'parsed': return self.parsed_sents(items)
if format == 'raw': return self.raw(items)
if format == 'tokenized': return self.sents(items)
if format == 'tagged': return self.tagged_sents(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .parsed_sents() instead.")
def parsed(self, items=None):
return self.parsed_sents(items)
@deprecated("Use .sents() instead.")
def tokenized(self, items=None):
return self.sents(items)
@deprecated("Use .tagged_sents() instead.")
def tagged(self, items=None):
return self.tagged_sents(items)
#}

13 changes: 0 additions & 13 deletions nltk/corpus/reader/cmudict.py
Expand Up @@ -47,7 +47,6 @@

import codecs

from nltk.internals import deprecated
from nltk.util import Index

from util import *
Expand Down Expand Up @@ -84,18 +83,6 @@ def dict(self):
"""
return dict(Index(self.entries()))

#{ Deprecated since 0.8
@deprecated("Use .entries() or .transcriptions() instead.")
def read(self, items='cmudict', format='listed'):
if format == 'listed': return self.entries(items)
if format == 'dictionary': return self.dict(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .dict() instead.")
def dictionary(self, items='cmudict'): return self.dict(items)
@deprecated("Use .entries() instead.")
def listed(self, items='cmudict'): return self.entries(items)
#}

def read_cmudict_block(stream):
entries = []
while len(entries) < 100: # Read 100 at a time.
Expand Down
23 changes: 0 additions & 23 deletions nltk/corpus/reader/conll.py
Expand Up @@ -14,7 +14,6 @@
import codecs
import textwrap

from nltk.internals import deprecated
from nltk.tree import Tree
from nltk.util import LazyMap, LazyConcatenation

Expand Down Expand Up @@ -367,28 +366,6 @@ def _get_column(grid, column_index):
return [grid[i][column_index] for i in range(len(grid))]


#/////////////////////////////////////////////////////////////////
#{ Deprecated since 0.8
#/////////////////////////////////////////////////////////////////
@deprecated("Use .raw() or .words() or .tagged_words() or "
".chunked_sents() instead.")
def read(self, items, format='chunked', chunk_types=None):
if format == 'chunked': return self.chunked_sents(items, chunk_types)
if format == 'raw': return self.raw(items)
if format == 'tokenized': return self.words(items)
if format == 'tagged': return self.tagged_words(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .chunked_sents() instead.")
def chunked(self, items, chunk_types=None):
return self.chunked_sents(items, chunk_types)
@deprecated("Use .words() instead.")
def tokenized(self, items):
return self.words(items)
@deprecated("Use .tagged_words() instead.")
def tagged(self, items):
return self.tagged_words(items)
#}

class ConllSRLInstance(object):
"""
An SRL instance from a CoNLL corpus, which identifies and
Expand Down
13 changes: 0 additions & 13 deletions nltk/corpus/reader/ieer.py
Expand Up @@ -24,7 +24,6 @@
import codecs

import nltk
from nltk.internals import deprecated

from api import *
from util import *
Expand Down Expand Up @@ -110,15 +109,3 @@ def _read_block(self, stream):
# Return the document
return ['\n'.join(out)]

#{ Deprecated since 0.8
@deprecated("Use .parsed_docs() or .raw() or .docs() instead.")
def read(self, items, format='parsed'):
if format == 'parsed': return self.parsed_docs(items)
if format == 'raw': return self.raw(items)
if format == 'docs': return self.docs(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .parsed_docs() instead.")
def parsed(self, items):
return self.parsed_docs(items)
#}

17 changes: 1 addition & 16 deletions nltk/corpus/reader/indian.py
Expand Up @@ -20,7 +20,6 @@

import codecs

from nltk.internals import deprecated
from nltk.tag.util import str2tuple

from util import *
Expand Down Expand Up @@ -63,21 +62,7 @@ def raw(self, fileids=None):
elif isinstance(fileids, basestring): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])

#{ Deprecated since 0.8
@deprecated("Use .raw() or .words() or .tagged_words() instead.")
def read(self, items, format='tagged'):
if format == 'raw': return self.raw(items)
if format == 'tokenized': return self.words(items)
if format == 'tagged': return self.tagged_words(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .words() instead.")
def tokenized(self, items):
return self.words(items)
@deprecated("Use .tagged_words() instead.")
def tagged(self, items):
return self.tagged_words(items)
#}


class IndianCorpusView(StreamBackedCorpusView):
def __init__(self, corpus_file, encoding, tagged,
group_by_sent, tag_mapping_function=None):
Expand Down
6 changes: 0 additions & 6 deletions nltk/corpus/reader/pl196x.py
Expand Up @@ -8,16 +8,12 @@
import os
import re


from nltk import tokenize, tree
from nltk.internals import deprecated


from util import *
from api import *
from xmldocs import XMLCorpusReader


# (?:something) -- non-grouping parentheses!

PARA = re.compile(r'<p(?: [^>]*){0,1}>(.*?)</p>')
Expand All @@ -32,8 +28,6 @@
TEXTID = re.compile(r'text id="(.*?)"')




class TEICorpusView(StreamBackedCorpusView):
def __init__(self, corpus_file,
tagged, group_by_sent, group_by_para,
Expand Down
11 changes: 0 additions & 11 deletions nltk/corpus/reader/plaintext.py
Expand Up @@ -15,7 +15,6 @@

import nltk.data
from nltk.tokenize import *
from nltk.internals import deprecated

from util import *
from api import *
Expand Down Expand Up @@ -153,16 +152,6 @@ def _read_para_block(self, stream):
for sent in self._sent_tokenizer.tokenize(para)])
return paras

#{ Deprecated since 0.8
@deprecated("Use .raw() or .words() instead.")
def read(self, items=None, format='tokenized'):
if format == 'raw': return self.raw(items)
if format == 'tokenized': return self.words(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .words() instead.")
def tokenized(self, items=None):
return self.words(items)
#}

class CategorizedPlaintextCorpusReader(CategorizedCorpusReader,
PlaintextCorpusReader):
Expand Down
9 changes: 0 additions & 9 deletions nltk/corpus/reader/ppattach.py
Expand Up @@ -40,8 +40,6 @@

import codecs

from nltk.internals import deprecated

from util import *
from api import *

Expand Down Expand Up @@ -93,10 +91,3 @@ def _read_obj_block(self, stream):
else:
return []

#{ Deprecated since 0.8
@deprecated("Use .tuples() or .raw() or .attachments() instead.")
def read(self, items, format='tuple'):
if format == 'tuple': return self.tuples(items)
if format == 'raw': return self.raw(items)
raise ValueError('bad format %r' % format)
#}
13 changes: 1 addition & 12 deletions nltk/corpus/reader/senseval.py
Expand Up @@ -29,7 +29,6 @@

from nltk.tokenize import *
from nltk.etree import ElementTree
from nltk.internals import deprecated

from util import *
from api import *
Expand Down Expand Up @@ -68,17 +67,7 @@ def _entry(self, tree):
elts.append( (sense, context) )
return elts

#{ Deprecated since 0.8
@deprecated("Use .instances() or .raw() instead.")
def read(self, items, format='listed'):
if format == 'listed': return self.instances(items)
if format == 'raw': return self.raw(items)
raise ValueError('bad format %r' % format)
@deprecated("Use .instances() instead.")
def listed(self, items):
return self.instances(items)
#}


class SensevalCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
Expand Down
1 change: 0 additions & 1 deletion nltk/corpus/reader/sinica_treebank.py
Expand Up @@ -42,7 +42,6 @@
import re

import nltk
from nltk.internals import deprecated

from util import *
from api import *
Expand Down
1 change: 0 additions & 1 deletion nltk/corpus/reader/tagged.py
Expand Up @@ -15,7 +15,6 @@

from nltk.tag import str2tuple
from nltk.tokenize import *
from nltk.internals import deprecated

from api import *
from util import *
Expand Down
29 changes: 1 addition & 28 deletions nltk/corpus/reader/timit.py
Expand Up @@ -126,7 +126,7 @@
import time

from nltk.tree import Tree
from nltk.internals import deprecated, import_from_stdlib
from nltk.internals import import_from_stdlib

from util import *
from api import *
Expand Down Expand Up @@ -414,33 +414,6 @@ def play(self, utterance, start=0, end=None):
print >>sys.stderr, ("you must install pygame or ossaudiodev "
"for audio playback.")

#{ Deprecated since 0.9.7
@deprecated("Use corpus.fileids() instead")
def files(self, filetype=None): return self.fileids(filetype)
@deprecated("Use corpus.utteranceids() instead")
def utterances(self, dialect=None, sex=None, spkrid=None,
sent_type=None, sentid=None):
return self.utteranceids(dialect, sex, spkrid, sent_type, sentid)
@deprecated("Use corpus.spkrutteranceids() instead")
def spkrutterances(self, speaker): return self.utteranceids(speaker)
#}

#{ Deprecated since 0.9.1
@deprecated("Use utteranceids(spkrid=...) instead.")
def spkritems(self, spkrid):
return self.utteranceids(spkrid=spkrid)
#}

#{ Deprecated since 0.8
@deprecated("Use .sents() or .sent_times() instead.")
def tokenized(self, utterances=None, offset=True):
if offset: return self.sent_times(utterances)
else: return self.sents(utterances)
@deprecated("Use .phones() or .phone_times() instead.")
def phonetic(self, utterances=None, offset=True):
if offset: return self.phone_times(utterances)
else: return self.phones(utterances)
#}

class SpeakerInfo:
def __init__(self, id, sex, dr, use, recdate, birthdate,
Expand Down

0 comments on commit 6323bab

Please sign in to comment.