Skip to content

Commit

Permalink
Rename "Trasformer" to "Transformer"; Python 3 syntax updates (#1146)
Browse files Browse the repository at this point in the history
* Rename "Trasformer" to "Transformer"

* Remove Python-2 style 'super' syntax

* Remove unneeded extension of object

* Update optimization_modules.py
  • Loading branch information
nimz committed Apr 23, 2021
1 parent a71abb2 commit 45dc644
Show file tree
Hide file tree
Showing 44 changed files with 121 additions and 121 deletions.
2 changes: 1 addition & 1 deletion ludwig/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
ludwig.contrib.contrib_import()


class CLI(object):
class CLI:
"""CLI describes a command line interface for interacting with Ludwig, there
are several different functions that can be performed. These functions are:
- experiment - run an experiment using ludwig
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/amazon_review_polarity/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="amazon_review_polarity", cache_dir=cache_dir)

def process_downloaded_dataset(self):
super(AmazonPolarity, self).process_downloaded_dataset(header=None)
super().process_downloaded_dataset(header=None)
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path,
self.csv_filename))
processed_df.columns = ['label', 'review_tile', 'review_text', 'split']
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/amazon_reviews/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="amazon_reviews", cache_dir=cache_dir)

def process_downloaded_dataset(self):
super(AmazonReviews, self).process_downloaded_dataset(header=None)
super().process_downloaded_dataset(header=None)
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path,
self.csv_filename))
processed_df.columns = ['label', 'review_tile', 'review_text', 'split']
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/dbpedia/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="dbpedia", cache_dir=cache_dir)

def process_downloaded_dataset(self):
super(DBPedia, self).process_downloaded_dataset(header=None)
super().process_downloaded_dataset(header=None)
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path,
self.csv_filename))
processed_df.columns = ['label', 'title', 'content', 'split']
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/ethos_binary/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="ethos_binary", cache_dir=cache_dir)

def process_downloaded_dataset(self):
super(EthosBinary, self).process_downloaded_dataset()
super().process_downloaded_dataset()
# replace ; sperator to ,
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path,
self.csv_filename), sep=";")
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/goemotions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def read_file(self, filetype, filename):
return file_df

def process_downloaded_dataset(self):
super(GoEmotions, self).process_downloaded_dataset()
super().process_downloaded_dataset()
# format emotion ids to be a set of emotion ids vs. string
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path,
self.csv_filename))
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/sst2/sst_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def process_downloaded_dataset(self):
index=False
)

super(SST, self).process_downloaded_dataset()
super().process_downloaded_dataset()


def format_text(text: str):
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/yahoo_answers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="yahoo_answers", cache_dir=cache_dir)

def process_downloaded_dataset(self):
super(YahooAnswers, self).process_downloaded_dataset(header=None)
super().process_downloaded_dataset(header=None)
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path,
self.csv_filename))
processed_df.columns = ['label', 'question_title', 'question', 'best_answer', 'split']
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/yelp_review_polarity/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="yelp_polarity", cache_dir=cache_dir)

def process_downloaded_dataset(self):
super(YelpPolarity, self).process_downloaded_dataset(header=None)
super().process_downloaded_dataset(header=None)
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path,
self.csv_filename))
processed_df.columns = ['label', 'text', 'split']
Expand Down
2 changes: 1 addition & 1 deletion ludwig/datasets/yelp_reviews/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="yelp_reviews", cache_dir=cache_dir)

def process_downloaded_dataset(self):
super(YelpReviews, self).process_downloaded_dataset(header=None)
super().process_downloaded_dataset(header=None)
processed_df = pd.read_csv(os.path.join(self.processed_dataset_path,
self.csv_filename))
processed_df.columns = ['label', 'text', 'split']
Expand Down
4 changes: 2 additions & 2 deletions ludwig/decoders/sequence_decoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def __init__(
reduce_input='sum',
**kwargs
):
super(SequenceGeneratorDecoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.cell_type = cell_type
Expand Down Expand Up @@ -720,7 +720,7 @@ def __init__(
is_timeseries=False,
**kwargs
):
super(SequenceTaggerDecoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.attention = attention
Expand Down
2 changes: 1 addition & 1 deletion ludwig/encoders/bag_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def __init__(
dropout=0.0,
**kwargs
):
super(BagEmbedWeightedEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

logger.debug(' EmbedWeighted')
Expand Down
2 changes: 1 addition & 1 deletion ludwig/encoders/binary_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def __init__(
self,
**kwargs
):
super(BinaryPassthroughEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

def call(self, inputs, training=None, mask=None):
Expand Down
4 changes: 2 additions & 2 deletions ludwig/encoders/category_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def __init__(
embedding_regularizer=None,
**kwargs
):
super(CategoricalEmbedEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

logger.debug(' Embed')
Expand Down Expand Up @@ -96,7 +96,7 @@ def __init__(
embedding_regularizer=None,
**kwargs
):
super(CategoricalSparseEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

logger.debug(' Embed')
Expand Down
4 changes: 2 additions & 2 deletions ludwig/encoders/date_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def __init__(
:type dropout: float
"""
super(DateEmbed, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

logger.debug(' year FCStack')
Expand Down Expand Up @@ -379,7 +379,7 @@ def __init__(
returning the encoder output.
:type dropout: float
"""
super(DateWave, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

logger.debug(' year FCStack')
Expand Down
4 changes: 2 additions & 2 deletions ludwig/encoders/generic_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(
self,
**kwargs
):
super(PassthroughEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

def call(self, inputs, training=None, mask=None):
Expand Down Expand Up @@ -61,7 +61,7 @@ def __init__(
dropout=0,
**kwargs
):
super(DenseEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

logger.debug(' FCStack')
Expand Down
6 changes: 3 additions & 3 deletions ludwig/encoders/h3_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def __init__(
is greater than 0).
:type regularize: Boolean
"""
super(H3Embed, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.embedding_size = embedding_size
Expand Down Expand Up @@ -333,7 +333,7 @@ def __init__(
is greater than 0).
:type regularize: Boolean
"""
super(H3WeightedSum, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.should_softmax = should_softmax
Expand Down Expand Up @@ -540,7 +540,7 @@ def __init__(
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(H3RNN, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.embedding_size = embedding_size
Expand Down
4 changes: 2 additions & 2 deletions ludwig/encoders/image_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def __init__(
fc_dropout=0,
**kwargs
):
super(Stacked2DCNN, self).__init__()
super().__init__()

logger.debug(' {}'.format(self.name))

Expand Down Expand Up @@ -183,7 +183,7 @@ def __init__(
dropout=0,
**kwargs
):
super(ResNetEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

if resnet_size < 50:
Expand Down
20 changes: 10 additions & 10 deletions ludwig/encoders/sequence_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

from ludwig.encoders.base import Encoder
from ludwig.utils.registry import Registry, register, register_default
from ludwig.modules.attention_modules import TrasformerStack
from ludwig.modules.attention_modules import TransformerStack
from ludwig.modules.convolutional_modules import Conv1DStack, \
ParallelConv1DStack, ParallelConv1D
from ludwig.modules.embedding_modules import EmbedSequence, \
Expand Down Expand Up @@ -61,7 +61,7 @@ def __init__(
and returns the full tensor).
:type reduce_output: str
"""
super(SequencePassthroughEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.reduce_output = reduce_output
Expand Down Expand Up @@ -189,7 +189,7 @@ def __init__(
:type dropout: Tensor
"""
super(SequenceEmbedEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.reduce_output = reduce_output
Expand Down Expand Up @@ -401,7 +401,7 @@ def __init__(
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(ParallelCNN, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

if conv_layers is not None and num_conv_layers is None:
Expand Down Expand Up @@ -723,7 +723,7 @@ def __init__(
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedCNN, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

if conv_layers is not None and num_conv_layers is None:
Expand Down Expand Up @@ -1083,7 +1083,7 @@ def __init__(
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedParallelCNN, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

if stacked_layers is not None and num_stacked_layers is None:
Expand Down Expand Up @@ -1412,7 +1412,7 @@ def __init__(
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedRNN, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.reduce_output = reduce_output
Expand Down Expand Up @@ -1676,7 +1676,7 @@ def __init__(
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedCNNRNN, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

if conv_layers is not None and num_conv_layers is None:
Expand Down Expand Up @@ -2005,7 +2005,7 @@ def __init__(
(which does not reduce and returns the full tensor).
:type reduce_output: str
"""
super(StackedTransformer, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

self.reduce_output = reduce_output
Expand Down Expand Up @@ -2042,7 +2042,7 @@ def __init__(
self.should_project = True

logger.debug(' TransformerStack')
self.transformer_stack = TrasformerStack(
self.transformer_stack = TransformerStack(
hidden_size=hidden_size,
num_heads=num_heads,
fc_size=transformer_fc_size,
Expand Down
2 changes: 1 addition & 1 deletion ludwig/encoders/set_encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def __init__(
reduce_output='sum',
**kwargs
):
super(SetSparseEncoder, self).__init__()
super().__init__()
logger.debug(' {}'.format(self.name))

logger.debug(' EmbedSparse')
Expand Down
Loading

0 comments on commit 45dc644

Please sign in to comment.