You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I've been getting this error recently. I have used nltk before without any problem. Why is this happening?
I am using jupyter (python3) notebook. However, if I import nltk on terminal, there is no error.
TypeError Traceback (most recent call last)
<ipython-input-1-53ff8bda7639> in <module>()
1 import numpy as np
2 import pandas as pd
----> 3 from nltk.tokenize import word_tokenize
/home/parth/.local/lib/python2.7/site-packages/nltk/__init__.pyc in <module>()
141 ###########################################################
142
--> 143 from nltk.chunk import *
144 from nltk.classify import *
145 from nltk.inference import *
/home/parth/.local/lib/python2.7/site-packages/nltk/chunk/__init__.py in <module>()
155 from nltk.data import load
156
--> 157 from nltk.chunk.api import ChunkParserI
158 from nltk.chunk.util import (
159 ChunkScore,
/home/parth/.local/lib/python2.7/site-packages/nltk/chunk/api.py in <module>()
11 ##//////////////////////////////////////////////////////
12
---> 13 from nltk.parse import ParserI
14
15 from nltk.chunk.util import ChunkScore
/home/parth/.local/lib/python2.7/site-packages/nltk/parse/__init__.py in <module>()
98 from nltk.parse.malt import MaltParser
99 from nltk.parse.evaluate import DependencyEvaluator
--> 100 from nltk.parse.transitionparser import TransitionParser
101 from nltk.parse.bllip import BllipParser
102 from nltk.parse.corenlp import CoreNLPParser, CoreNLPDependencyParser
/home/parth/.local/lib/python2.7/site-packages/nltk/parse/transitionparser.py in <module>()
20 from numpy import array
21 from scipy import sparse
---> 22 from sklearn.datasets import load_svmlight_file
23 from sklearn import svm
24 except ImportError:
/home/parth/.local/lib/python2.7/site-packages/sklearn/datasets/__init__.py in <module>()
21 from .lfw import fetch_lfw_pairs
22 from .lfw import fetch_lfw_people
---> 23 from .twenty_newsgroups import fetch_20newsgroups
24 from .twenty_newsgroups import fetch_20newsgroups_vectorized
25 from .mldata import fetch_mldata, mldata_filename
/home/parth/.local/lib/python2.7/site-packages/sklearn/datasets/twenty_newsgroups.py in <module>()
42 from .base import _fetch_remote
43 from .base import RemoteFileMetadata
---> 44 from ..feature_extraction.text import CountVectorizer
45 from ..preprocessing import normalize
46 from ..utils import deprecated
/home/parth/.local/lib/python2.7/site-packages/sklearn/feature_extraction/__init__.py in <module>()
8 from .hashing import FeatureHasher
9 from .image import img_to_graph, grid_to_graph
---> 10 from . import text
11
12 __all__ = ['DictVectorizer', 'image', 'img_to_graph', 'grid_to_graph', 'text',
/home/parth/.local/lib/python2.7/site-packages/sklearn/feature_extraction/text.py in <module>()
28 from ..externals import six
29 from ..externals.six.moves import xrange
---> 30 from ..preprocessing import normalize
31 from .hashing import FeatureHasher
32 from .stop_words import ENGLISH_STOP_WORDS
/home/parth/.local/lib/python2.7/site-packages/sklearn/preprocessing/__init__.py in <module>()
4 """
5
----> 6 from ._function_transformer import FunctionTransformer
7
8 from .data import Binarizer
/home/parth/.local/lib/python2.7/site-packages/sklearn/preprocessing/_function_transformer.py in <module>()
3 from ..base import BaseEstimator, TransformerMixin
4 from ..utils import check_array
----> 5 from ..utils.testing import assert_allclose_dense_sparse
6 from ..externals.six import string_types
7
/home/parth/.local/lib/python2.7/site-packages/sklearn/utils/testing.py in <module>()
749
750 try:
--> 751 import pytest
752
753 skip_if_32bit = pytest.mark.skipif(_IS_32BIT,
/usr/lib/python2.7/dist-packages/pytest.py in <module>()
11 hookspec, hookimpl
12 )
---> 13 from _pytest.fixtures import fixture, yield_fixture
14 from _pytest.assertion import register_assert_rewrite
15 from _pytest.freeze_support import freeze_includes
/usr/lib/python2.7/dist-packages/_pytest/fixtures.py in <module>()
840
841 @attr.s(frozen=True)
--> 842 class FixtureFunctionMarker(object):
843 scope = attr.ib()
844 params = attr.ib(convert=attr.converters.optional(tuple))
/usr/lib/python2.7/dist-packages/_pytest/fixtures.py in FixtureFunctionMarker()
842 class FixtureFunctionMarker(object):
843 scope = attr.ib()
--> 844 params = attr.ib(convert=attr.converters.optional(tuple))
845 autouse = attr.ib(default=False)
846 ids = attr.ib(default=None, convert=_ensure_immutable_ids)
TypeError: attrib() got an unexpected keyword argument 'convert'
The text was updated successfully, but these errors were encountered:
I couldn't reproduce the issue: I'm able to import word_tokenize both from the terminal and Jupiter. Are you using the same environment (e.g. the same environment created with virtualenv) for both your attempts?
I've been getting this error recently. I have used nltk before without any problem. Why is this happening?
I am using jupyter (python3) notebook. However, if I import nltk on terminal, there is no error.
The text was updated successfully, but these errors were encountered: