Skip to content
This repository has been archived by the owner on Nov 22, 2022. It is now read-only.

Add self attention option to conv_encoder and conv_decoder #1291

Closed
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 20 additions & 1 deletion pytext/models/seq_models/base.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, Optional
from typing import Dict, Optional, Tuple

from pytext.models.module import Module
from pytext.utils.usage import log_class_usage
Expand Down Expand Up @@ -54,3 +54,22 @@ def reorder_incremental_state(
class PlaceholderIdentity(nn.Module):
def forward(self, x, incremental_state: Optional[Dict[str, Tensor]] = None):
return x


class PlaceholderAttentionIdentity(nn.Module):
def forward(
self,
query,
key,
value,
need_weights: bool = None,
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Tensor]] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
optional_attention: Optional[Tensor] = None
return query, optional_attention

def reorder_incremental_state(
self, incremental_state: Dict[str, Tensor], new_order: Tensor
):
pass