diff --git a/configs/vqa_med_2019/c2_classification/c2_word_answer_onehot_bow.yml b/configs/vqa_med_2019/c2_classification/c2_word_answer_onehot_bow.yml index 3733970..73dcce7 100644 --- a/configs/vqa_med_2019/c2_classification/c2_word_answer_onehot_bow.yml +++ b/configs/vqa_med_2019/c2_classification/c2_word_answer_onehot_bow.yml @@ -5,6 +5,8 @@ default_configs: vqa_med_2019/c2_classification/default_c2_classification.yml training: problem: batch_size: 128 + terminal_conditions: + episode_limit: 1000 # Validation parameters: validation: @@ -49,7 +51,7 @@ pipeline: # Model. classifier: type: FeedForwardNetwork - hidden_sizes: [500, 500] + hidden_sizes: [50] dropout_rate: 0.5 priority: 3 streams: diff --git a/configs/vqa_med_2019/c4_classification/c4_word_answer_onehot_bow.yml b/configs/vqa_med_2019/c4_classification/c4_word_answer_onehot_bow.yml index 1db17e6..e19a039 100644 --- a/configs/vqa_med_2019/c4_classification/c4_word_answer_onehot_bow.yml +++ b/configs/vqa_med_2019/c4_classification/c4_word_answer_onehot_bow.yml @@ -4,8 +4,7 @@ default_configs: vqa_med_2019/c4_classification/default_c4_classification.yml # Training parameters: training: problem: - batch_size: 128 - + batch_size: 128 # Validation parameters: validation: problem: diff --git a/ptp/components/problems/image_text_to_class/vqa_med_2019.py b/ptp/components/problems/image_text_to_class/vqa_med_2019.py index 704b6c1..a67f8fe 100644 --- a/ptp/components/problems/image_text_to_class/vqa_med_2019.py +++ b/ptp/components/problems/image_text_to_class/vqa_med_2019.py @@ -448,7 +448,7 @@ def __getitem__(self, index): image_transformations_list = [] # Optional. if 'random_affine' in self.image_preprocessing: - rotate = (-45, 135) + rotate = (-45, 80) translate = (0.05, 0.25) scale = (0.5, 2) image_transformations_list.append(transforms.RandomAffine(rotate, translate, scale)) @@ -511,9 +511,10 @@ def predict_yes_no(self, qtext): Determines whether this is binary (yes/no) type of question. """ yes_no_starters = ['is','was','are','does'] - tokens = qtext.split(' ') - first_token = tokens[0] - if first_token in yes_no_starters and ('or' not in tokens): + if 'tokenize' not in self.question_preprocessing: + qtext = qtext.split(' ') + first_token = qtext[0] + if first_token in yes_no_starters and ('or' not in qtext): return True return False