Skip to content

Commit

Permalink
Fixed vector output feature
Browse files Browse the repository at this point in the history
  • Loading branch information
w4nderlust committed Aug 27, 2019
1 parent 95306c6 commit 80555fb
Showing 1 changed file with 17 additions and 33 deletions.
50 changes: 17 additions & 33 deletions ludwig/features/vector_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import numpy as np
import tensorflow as tf
from ludwig.models.modules.initializer_modules import get_initializer

from ludwig.constants import *
from ludwig.features.base_feature import BaseFeature
Expand Down Expand Up @@ -179,16 +180,6 @@ def __init__(self, feature):

_ = self.overwrite_defaults(feature)

self.decoder = 'fc_stack'
feature['fc_size'] = self.vector_size
self.decoder_obj = self.get_vector_decoder(feature)

def get_vector_decoder(self, decoder_parameters):
return get_from_registry(
self.decoder, vector_decoder_registry)(
**decoder_parameters
)

def _get_output_placeholder(self):
return tf.placeholder(
tf.float32,
Expand Down Expand Up @@ -264,24 +255,18 @@ def build_output(
):
train_mean_loss, eval_loss, output_tensors = self.build_vector_output(
self._get_output_placeholder(),
self.decoder_obj,
hidden,
hidden_size,
regularizer=regularizer,
dropout_rate=dropout_rate,
is_training=is_training
)
return train_mean_loss, eval_loss, output_tensors

def build_vector_output(
self,
targets,
decoder,
hidden,
hidden_size,
regularizer=None,
dropout_rate=None,
is_training=None
):
feature_name = self.name
output_tensors = {}
Expand All @@ -291,12 +276,9 @@ def build_vector_output(

# ================ Predictions ================
logits, logits_size, predictions = self.vector_predictions(
decoder,
hidden,
hidden_size,
regularizer=regularizer,
dropout_rate=dropout_rate,
is_training=is_training
)

output_tensors[PREDICTIONS + '_' + feature_name] = predictions
Expand Down Expand Up @@ -343,28 +325,34 @@ def build_vector_output(

def vector_predictions(
self,
decoder,
hidden,
hidden_size,
regularizer=None,
dropout_rate=None,
is_training=None
):
with tf.variable_scope('predictions_{}'.format(self.name)):
logits, logits_size = decoder(
hidden,
hidden_size,
regularizer=regularizer,
dropout_rate=dropout_rate,
is_training=is_training
initializer_obj = get_initializer(self.initializer)
weights = tf.compat.v1.get_variable(
'weights',
initializer=initializer_obj([hidden_size, self.vector_size]),
regularizer=regularizer
)
logger.debug(' projection_weights: {0}'.format(weights))

biases = tf.compat.v1.get_variable(
'biases',
[self.vector_size]
)
logger.debug(' projection_biases: {0}'.format(biases))

logits = tf.matmul(hidden, weights) + biases
logger.debug(' logits: {0}'.format(logits))

if self.softmax:
predictions = tf.nn.softmax(logits)
else:
predictions = logits

return logits, logits_size, predictions
return logits, self.vector_size, predictions

default_validation_measure = LOSS

Expand Down Expand Up @@ -465,7 +453,3 @@ def populate_defaults(output_feature):
vector_encoder_registry = {
'fc_stack': Dense
}

vector_decoder_registry = {
'fc_stack': Dense
}

0 comments on commit 80555fb

Please sign in to comment.