Spaces:
Runtime error
Runtime error
| # Copyright (c) Facebook, Inc. and its affiliates. | |
| # | |
| # This source code is licensed under the MIT license found in the | |
| # LICENSE file in the root directory of this source tree. | |
| import torch.nn as nn | |
| import math | |
| import torch | |
| class PositionalEncoding(nn.Module): | |
| """Positional encoding. | |
| Args: | |
| d_model: Embedding dimension. | |
| dropout_rate: Dropout rate. | |
| max_len: Maximum input length. | |
| reverse: Whether to reverse the input position. | |
| """ | |
| def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False): | |
| """Construct an PositionalEncoding object.""" | |
| super(PositionalEncoding, self).__init__() | |
| self.d_model = d_model | |
| self.reverse = reverse | |
| self.xscale = math.sqrt(self.d_model) | |
| self.dropout = nn.Dropout(p=dropout_rate) | |
| self.pe = None | |
| self.extend_pe(torch.tensor(0.0).expand(1, max_len)) | |
| def extend_pe(self, x): | |
| """Reset the positional encodings.""" | |
| if self.pe is not None: | |
| if self.pe.size(1) >= x.size(1): | |
| if self.pe.dtype != x.dtype or self.pe.device != x.device: | |
| self.pe = self.pe.to(dtype=x.dtype, device=x.device) | |
| return | |
| pe = torch.zeros(x.size(1), self.d_model) | |
| if self.reverse: | |
| position = torch.arange( | |
| x.size(1) - 1, -1, -1.0, dtype=torch.float32 | |
| ).unsqueeze(1) | |
| else: | |
| position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) | |
| div_term = torch.exp( | |
| torch.arange(0, self.d_model, 2, dtype=torch.float32) | |
| * -(math.log(10000.0) / self.d_model) | |
| ) | |
| pe[:, 0::2] = torch.sin(position * div_term) | |
| pe[:, 1::2] = torch.cos(position * div_term) | |
| pe = pe.unsqueeze(0) | |
| self.pe = pe.to(device=x.device, dtype=x.dtype) | |
| def forward(self, x: torch.Tensor): | |
| """Add positional encoding. | |
| Args: | |
| x (torch.Tensor): Input tensor B X T X C | |
| Returns: | |
| torch.Tensor: Encoded tensor B X T X C | |
| """ | |
| self.extend_pe(x) | |
| x = x * self.xscale + self.pe[:, : x.size(1)] | |
| return self.dropout(x) | |
| class RelPositionalEncoding(nn.Module): | |
| """Relative positional encoding module (new implementation). | |
| Args: | |
| d_model: Embedding dimension. | |
| dropout_rate: Dropout rate. | |
| max_len: Maximum input length. | |
| """ | |
| def __init__(self, max_len, d_model): | |
| """Construct an PositionalEncoding object.""" | |
| super(RelPositionalEncoding, self).__init__() | |
| self.d_model = d_model | |
| self.pe = None | |
| self.extend_pe(torch.tensor(0.0).expand(1, max_len)) | |
| def extend_pe(self, x): | |
| """Reset the positional encodings.""" | |
| if self.pe is not None: | |
| # self.pe contains both positive and negative parts | |
| # the length of self.pe is 2 * input_len - 1 | |
| if self.pe.size(1) >= x.size(1) * 2 - 1: | |
| if self.pe.dtype != x.dtype or self.pe.device != x.device: | |
| self.pe = self.pe.to(dtype=x.dtype, device=x.device) | |
| return | |
| # Suppose `i` means to the position of query vecotr and `j` means the | |
| # position of key vector. We use position relative positions when keys | |
| # are to the left (i>j) and negative relative positions otherwise (i<j). | |
| pe_positive = torch.zeros(x.size(1), self.d_model) | |
| pe_negative = torch.zeros(x.size(1), self.d_model) | |
| position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) | |
| div_term = torch.exp( | |
| torch.arange(0, self.d_model, 2, dtype=torch.float32) | |
| * -(math.log(10000.0) / self.d_model) | |
| ) | |
| pe_positive[:, 0::2] = torch.sin(position * div_term) | |
| pe_positive[:, 1::2] = torch.cos(position * div_term) | |
| pe_negative[:, 0::2] = torch.sin(-1 * position * div_term) | |
| pe_negative[:, 1::2] = torch.cos(-1 * position * div_term) | |
| # Reserve the order of positive indices and concat both positive and | |
| # negative indices. This is used to support the shifting trick | |
| # as in https://arxiv.org/abs/1901.02860 | |
| pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0) | |
| pe_negative = pe_negative[1:].unsqueeze(0) | |
| pe = torch.cat([pe_positive, pe_negative], dim=1) | |
| self.pe = pe.to(device=x.device, dtype=x.dtype) | |
| def forward(self, x: torch.Tensor): | |
| """Add positional encoding. | |
| Args: | |
| x : Input tensor T X B X C. | |
| Returns: | |
| torch.Tensor: Encoded tensor T X B X C. | |
| """ | |
| x = x.transpose(0, 1) # Change TBC to BTC | |
| self.extend_pe(x) | |
| pos_emb = self.pe[ | |
| :, | |
| self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1), | |
| ] | |
| pos_emb = pos_emb.transpose(0, 1) # change to TBC | |
| return pos_emb | |