Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add mv-lstm config/ Combination 2D block/ K max pooling block #77

Open
wants to merge 40 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
0bb5e88
Create mv_lstm.json
fareise Jul 10, 2019
567f755
Create Combination2D
fareise Jul 10, 2019
4a4b12c
Create PoolingKmax2D
fareise Jul 10, 2019
50f8ca0
Rename PoolingKmax2D to PoolingKmax2D.py
fareise Jul 10, 2019
91bbe51
Rename Combination2D to Combination2D.py
fareise Jul 10, 2019
9f250b4
Update mv_lstm.json
fareise Jul 10, 2019
17f8714
Update PoolingKmax2D.py
fareise Jul 10, 2019
a6f3da1
Update PoolingKmax2D.py
fareise Jul 16, 2019
7ea6dd2
Update Combination2D.py
fareise Jul 16, 2019
1ab7924
Update PoolingKmax2D.py
fareise Jul 16, 2019
3d95f73
Update PoolingKmax2D.py
fareise Jul 16, 2019
98d8b99
Update PoolingKmax2D.py
fareise Jul 16, 2019
9648630
Update Combination2D.py
fareise Jul 17, 2019
6df09a9
Update Combination2D.py
fareise Jul 17, 2019
862964f
Update PoolingKmax2D.py
fareise Jul 17, 2019
1a1ddde
Update PoolingKmax2D.py
fareise Jul 21, 2019
51b9127
Update Combination2D.py
fareise Jul 21, 2019
232b761
Update PoolingKmax2D.py
fareise Jul 21, 2019
da7cac5
Update Combination2D.py
fareise Jul 21, 2019
8145a58
Update PoolingKmax2D.py
fareise Jul 23, 2019
b4fcbc2
Update Combination2D.py
fareise Jul 23, 2019
d288752
Update mv_lstm.json
fareise Jul 23, 2019
05a5cc5
Update Combination2D.py
fareise Jul 23, 2019
e797290
Update Combination2D.py
fareise Jul 24, 2019
663fb47
Update PoolingKmax2D.py
fareise Jul 25, 2019
bf96678
Update Combination2D.py
fareise Jul 25, 2019
21d6dd8
Update Combination2D.py
fareise Jul 25, 2019
85d3cc9
Merge pull request #1 from microsoft/master
fareise Jul 28, 2019
031e9cc
Delete Expand_plus.py
fareise Jul 28, 2019
2eef322
Update conf_question_answer_matching_arcii.json
fareise Jul 28, 2019
a1df749
Create conf_question_answer_matching_mvlstm.json
fareise Jul 28, 2019
be94c7d
Update conf_question_answer_matching_mvlstm.json
fareise Jul 29, 2019
1aa64c9
Update __init__.py
fareise Jul 29, 2019
83d879d
Update __init__.py
fareise Jul 29, 2019
31cade7
Update __init__.py
fareise Jul 29, 2019
9b97d73
Update __init__.py
fareise Jul 29, 2019
f6ba77a
Delete mv_lstm.json
fareise Jul 29, 2019
fcc44c4
Fintune MV-LSTM and get 0.7736 AUC
fareise Aug 6, 2019
7244460
add mv lstm result
fareise Aug 6, 2019
725812d
add mv lstm result
fareise Aug 6, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,7 @@ Question answer matching is a crucial subtask of the question answering problem,
[ARC-I](https://arxiv.org/abs/1503.03244) (NeuronBlocks) | 0.7508
[ARC-II](https://arxiv.org/abs/1503.03244) (NeuronBlocks) | 0.7612
[MatchPyramid](https://arxiv.org/abs/1602.06359) (NeuronBlocks) | 0.763
[MV-LSTM](https://arxiv.org/abs/1511.08277) (NeuronBlocks) | 0.774
BiLSTM+Match Attention (NeuronBlocks) | 0.786


Expand Down
1 change: 1 addition & 0 deletions Tutorial_zh_CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,7 @@ Question answer matching is a crucial subtask of the question answering problem,
[ARC-I](https://arxiv.org/abs/1503.03244) (NeuronBlocks) | 0.7508
[ARC-II](https://arxiv.org/abs/1503.03244) (NeuronBlocks) | 0.7612
[MatchPyramid](https://arxiv.org/abs/1602.06359) (NeuronBlocks) | 0.763
[MV-LSTM](https://arxiv.org/abs/1511.08277) (NeuronBlocks) | 0.774
BiLSTM+Match Attention (NeuronBlocks) | 0.786


Expand Down
73 changes: 73 additions & 0 deletions block_zoo/PoolingKmax2D.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.

import torch
import torch.nn as nn
import torch.nn.functional as F

import numpy as np

from block_zoo.BaseLayer import BaseLayer, BaseConf
from utils.DocInherit import DocInherit


class PoolingKmax2DConf(BaseConf):
"""
Args:
pool_type (str): 'max', default is 'max'.
k (int): how many element to reserve.
"""
def __init__(self, **kwargs):
super(PoolingKmax2DConf, self).__init__(**kwargs)

@DocInherit
def default(self):
self.pool_type = 'max' # Supported: ['max']
self.k = 50

@DocInherit
def declare(self):
self.num_of_inputs = 1
self.input_ranks = [4]


@DocInherit
def inference(self):
self.output_dim = [self.input_dims[0][0], self.input_dims[0][3] * self.k]
self.output_rank = len(self.output_dim)

@DocInherit
def verify(self):
super(PoolingKmax2DConf, self).verify()
necessary_attrs_for_user = ['pool_type']
for attr in necessary_attrs_for_user:
self.add_attr_exist_assertion_for_user(attr)
self.add_attr_value_assertion('pool_type', ['max'])

assert all([input_rank == 4 for input_rank in self.input_ranks]), "Cannot apply a pooling layer on a tensor of which the rank is not 4. Usually, a tensor whose rank is 4, e.g. [batch size, length, width, feature]"
assert self.output_dim[-1] != -1, "The shape of input is %s , and the input channel number of pooling should not be -1." % (str(self.input_dims[0]))

class PoolingKmax2D(BaseLayer):
""" Pooling layer
Args:
layer_conf (PoolingKmax2DConf): configuration of a layer
"""
def __init__(self, layer_conf):
super(PoolingKmax2D, self).__init__(layer_conf)
self.k = layer_conf.k

def forward(self, string, string_len=None):
""" process inputs
Args:
string (Tensor): tensor with shape: [batch_size, length, width, feature_dim]
string_len (Tensor): [batch_size], default is None.
Returns:
Tensor: Pooling result of string
"""
string = string.permute(0, 3, 1, 2)
string = string.view(string.size()[0], string.size()[1], -1)
index = string.topk(self.k, dim=-1)[1].sort(dim=-1)[0]
string = string.gather(-1, index)
string = string.view(string.size()[0], -1)

return string, string_len
1 change: 1 addition & 0 deletions block_zoo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from .Conv import Conv, ConvConf
from .Pooling import Pooling, PoolingConf
from .ConvPooling import ConvPooling, ConvPoolingConf
from .PoolingKmax2D import PoolingKmax2D, PoolingKmax2DConf

from .Dropout import Dropout, DropoutConf

Expand Down
103 changes: 103 additions & 0 deletions block_zoo/op/Combination2D.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable

import numpy as np
import logging

from block_zoo.BaseLayer import BaseConf
from utils.DocInherit import DocInherit
from utils.exceptions import ConfigurationError
import copy

class Combination2DConf(BaseConf):
""" Configuration for combination layer
Args:
operations (list): a subset of ["dot", "bilinear", "add"].
"""
def __init__(self, **kwargs):
super(Combination2DConf, self).__init__(**kwargs)

@DocInherit
def default(self):
self.operations = ["dot", "bilinear", "add"]

@DocInherit
def declare(self):
self.num_of_inputs = -1
self.input_ranks = [-1]

@DocInherit
def inference(self):
self.output_dim = [self.input_dims[0][0], self.input_dims[0][1], self.input_dims[1][1], len(self.operations)]
if "add" in self.operations:
self.output_dim[-1] = self.output_dim[-1] + self.input_dims[0][-1] - 1

super(Combination2DConf, self).inference()

@DocInherit
def verify(self):
super(Combination2DConf, self).verify()

# to check if the ranks of all the inputs are equal
rank_equal_flag = True
for i in range(len(self.input_ranks)):
if self.input_ranks[i] != self.input_ranks[0]:
rank_equal_flag = False
break
if rank_equal_flag == False:
raise ConfigurationError("For layer Combination, the ranks of each inputs should be consistent!")


class Combination2D(nn.Module):
""" Combination2D layer to merge the representation of two sequence
Args:
layer_conf (Combination2DConf): configuration of a layer
"""
def __init__(self, layer_conf):
super(Combination2D, self).__init__()
self.layer_conf = layer_conf

self.weight_bilinear = torch.nn.Linear(self.layer_conf.input_dims[0][-1], self.layer_conf.input_dims[0][-1])


logging.warning("The length Combination layer returns is the length of first input")

def forward(self, *args):
""" process inputs
Args:
args (list): [string, string_len, string2, string2_len, ...]
e.g. string (Variable): [batch_size, dim], string_len (ndarray): [batch_size]
Returns:
Variable: [batch_size, width, height, dim], None
"""

result = []
if "dot" in self.layer_conf.operations:
string1 = args[0]
string2 = args[2]
result_multiply = torch.matmul(string1, string2.transpose(1,2))

result.append(torch.unsqueeze(result_multiply, 3))


if "bilinear" in self.layer_conf.operations:
string1 = args[0]
string2 = args[2]
string1 = self.weight_bilinear(string1)
result_multiply = torch.matmul(string1, string2.transpose(1,2))

result.append(torch.unsqueeze(result_multiply, 3))

if "add" in self.layer_conf.operations:
string1 = args[0]
string2 = args[2]
x_new = torch.stack([string1]*string2.size()[1], 2) # [batch_size, x_max_len, y_max_len, dim]
y_new = torch.stack([string2]*string1.size()[1], 1) # [batch_size, x_max_len, y_max_len, dim]
result.append((x_new + y_new))

return torch.cat(result, 3), args[1]
76 changes: 0 additions & 76 deletions block_zoo/op/Expand_plus.py

This file was deleted.

4 changes: 2 additions & 2 deletions block_zoo/op/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from .Concat2D import Concat2D, Concat2DConf
from .Concat3D import Concat3D, Concat3DConf
from .Combination import Combination, CombinationConf
from .Combination2D import Combination2D, Combination2DConf
from .Match import Match, MatchConf
from .Flatten import Flatten, FlattenConf
from .Expand_plus import Expand_plus, Expand_plusConf
from .CalculateDistance import CalculateDistance, CalculateDistanceConf
from .CalculateDistance import CalculateDistance, CalculateDistanceConf
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,9 @@
},
{
"layer_id": "match",
"layer": "Expand_plus",
"layer": "Combination2D",
"conf": {
"operations": ["add"]
},
"inputs": ["s1_conv_1", "s2_conv_1"]
},
Expand Down Expand Up @@ -209,4 +210,4 @@
]
},
"metrics": ["auc", "accuracy"]
}
}
Loading